Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FMWK-399 Add support for remove artifacts flag #146

Merged
merged 14 commits into from
Sep 19, 2024
2 changes: 1 addition & 1 deletion cmd/asbackup/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ func run(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
// Run app.

return asb.Run(cmd.Context())
}

Expand Down
4 changes: 1 addition & 3 deletions cmd/asbackup/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ Backup Flags:

--parallel-nodes Specifies how to perform scan. If set to true, we launch parallel workers for nodes;
otherwise workers run in parallel for partitions.
--remove-artifacts Remove existing backup file (-o) or files (-d) without performing a backup.

Compression Flags:
-z, --compress string Enables compressing of backup files using the specified compression algorithm.
Expand Down Expand Up @@ -146,9 +147,6 @@ Azure Flags:

## Unsupported flags
```
--remove-artifacts Remove existing backup file (-o) or files (-d) without performing a backup.
This option is mutually exclusive to --continue and --estimate.

--continue Resumes an interrupted/failed backup from where it was left off, given the .state file
that was generated from the interrupted/failed run.

Expand Down
22 changes: 15 additions & 7 deletions cmd/internal/app/asbackup.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ type ASBackup struct {
writer backup.Writer
}

//nolint:dupl // Code is very similar as NewASRestore but different.
func NewASBackup(
ctx context.Context,
clientConfig *client.AerospikeConfig,
Expand All @@ -52,6 +51,16 @@ func NewASBackup(
return nil, err
}

writer, err := getWriter(ctx, backupParams, commonParams, awsS3, gcpStorage, azureBlob)
if err != nil {
return nil, fmt.Errorf("failed to create backup writer: %w", err)
}

if backupParams.RemoveArtifacts {
// We clean the folder on initialization.
return nil, nil
}

aerospikeClient, err := newAerospikeClient(clientConfig)
if err != nil {
return nil, fmt.Errorf("failed to create aerospike client: %w", err)
Expand All @@ -73,11 +82,6 @@ func NewASBackup(
return nil, fmt.Errorf("failed to create backup client: %w", err)
}

writer, err := getWriter(ctx, backupParams, commonParams, awsS3, gcpStorage, azureBlob)
if err != nil {
return nil, fmt.Errorf("failed to create backup writer: %w", err)
}

return &ASBackup{
backupClient: backupClient,
backupConfig: backupConfig,
Expand All @@ -86,6 +90,10 @@ func NewASBackup(
}

func (b *ASBackup) Run(ctx context.Context) error {
if b == nil {
return nil
}

h, err := b.backupClient.Backup(ctx, b.backupConfig, b.writer)
if err != nil {
return fmt.Errorf("failed to start backup: %w", err)
Expand Down Expand Up @@ -116,7 +124,7 @@ func getWriter(
case azureBlob.ContainerName != "":
return newAzureWriter(ctx, azureBlob, backupParams, commonParams)
default:
return newLocalWriter(backupParams, commonParams)
return newLocalWriter(ctx, backupParams, commonParams)
}
}

Expand Down
15 changes: 9 additions & 6 deletions cmd/internal/app/asrestore.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ type ASRestore struct {
reader backup.StreamingReader
}

//nolint:dupl // Code is very similar as NewASBackup but different.
func NewASRestore(
ctx context.Context,
clientConfig *client.AerospikeConfig,
Expand All @@ -52,6 +51,11 @@ func NewASRestore(
return nil, err
}

reader, err := getReader(ctx, restoreParams, commonParams, awsS3, gcpStorage, azureBlob)
if err != nil {
return nil, fmt.Errorf("failed to create backup reader: %w", err)
}

aerospikeClient, err := newAerospikeClient(clientConfig)
if err != nil {
return nil, fmt.Errorf("failed to create aerospike client: %w", err)
Expand All @@ -73,11 +77,6 @@ func NewASRestore(
return nil, fmt.Errorf("failed to create backup client: %w", err)
}

reader, err := getReader(ctx, restoreParams, commonParams, awsS3, gcpStorage, azureBlob)
if err != nil {
return nil, fmt.Errorf("failed to create backup reader: %w", err)
}

return &ASRestore{
backupClient: backupClient,
restoreConfig: restoreConfig,
Expand All @@ -86,6 +85,10 @@ func NewASRestore(
}

func (r *ASRestore) Run(ctx context.Context) error {
if r == nil {
return nil
}

h, err := r.backupClient.Restore(ctx, r.restoreConfig, r.reader)
if err != nil {
return fmt.Errorf("failed to start restore: %w", err)
Expand Down
5 changes: 4 additions & 1 deletion cmd/internal/app/clients_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (

"github.com/aerospike/backup-go/cmd/internal/models"
"github.com/aerospike/tools-common-go/client"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
Expand Down Expand Up @@ -73,6 +74,8 @@ func TestClients_newAerospikeClient(t *testing.T) {

func TestClients_newS3Client(t *testing.T) {
t.Parallel()
err := createAwsCredentials()
assert.NoError(t, err)

cfg := &models.AwsS3{
Region: testS3Region,
Expand All @@ -81,7 +84,7 @@ func TestClients_newS3Client(t *testing.T) {
}

ctx := context.Background()
_, err := newS3Client(ctx, cfg)
_, err = newS3Client(ctx, cfg)
require.NoError(t, err)
}

Expand Down
25 changes: 18 additions & 7 deletions cmd/internal/app/writers.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (
"github.com/aerospike/backup-go/io/local"
)

func newLocalWriter(b *models.Backup, c *models.Common) (backup.Writer, error) {
func newLocalWriter(ctx context.Context, b *models.Backup, c *models.Common) (backup.Writer, error) {
var opts []local.Opt

if c.Directory != "" && b.OutputFile == "" {
Expand All @@ -38,11 +38,13 @@ func newLocalWriter(b *models.Backup, c *models.Common) (backup.Writer, error) {
opts = append(opts, local.WithFile(b.OutputFile))
}

if b.RemoveFiles {
if b.ShouldClearTarget() {
opts = append(opts, local.WithRemoveFiles())
}

return local.NewWriter(opts...)
opts = append(opts, local.WithValidator(asb.NewValidator()))

return local.NewWriter(ctx, opts...)
}

func newS3Writer(
Expand Down Expand Up @@ -70,10 +72,12 @@ func newS3Writer(
opts = append(opts, s3.WithFile(path))
}

if b.RemoveFiles {
if b.ShouldClearTarget() {
opts = append(opts, s3.WithRemoveFiles())
}

opts = append(opts, s3.WithValidator(asb.NewValidator()))

return s3.NewWriter(ctx, client, bucketName, opts...)
}

Expand All @@ -98,10 +102,12 @@ func newGcpWriter(
opts = append(opts, storage.WithFile(b.OutputFile))
}

if b.RemoveFiles {
if b.ShouldClearTarget() {
opts = append(opts, storage.WithRemoveFiles())
}

opts = append(opts, storage.WithValidator(asb.NewValidator()))

return storage.NewWriter(ctx, client, g.BucketName, opts...)
}

Expand All @@ -126,10 +132,12 @@ func newAzureWriter(
opts = append(opts, blob.WithFile(b.OutputFile))
}

if b.RemoveFiles {
if b.ShouldClearTarget() {
opts = append(opts, blob.WithRemoveFiles())
}

opts = append(opts, blob.WithValidator(asb.NewValidator()))

return blob.NewWriter(ctx, client, a.ContainerName, opts...)
}

Expand All @@ -138,10 +146,13 @@ func newAzureWriter(
func getBucketFromPath(path string) (bucket, cleanPath string) {
parts := strings.Split(path, "/")
if len(parts) < 2 {
return "", path
return path, "/"
}

cleanPath = strings.Join(parts[1:], "/")
if cleanPath == "" {
cleanPath = "/"
}

return parts[0], cleanPath
}
66 changes: 46 additions & 20 deletions cmd/internal/app/writers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,22 +47,50 @@ const (
)

func TestGetBucketFromPath(t *testing.T) {
t.Parallel()
tests := []struct {
input string
expectedBucket string
expectedPath string
name string
path string
wantBucket string
wantCleanPath string
}{
{input: "bucket/path/to/file", expectedBucket: "bucket", expectedPath: "path/to/file"},
{input: "single-part", expectedBucket: "", expectedPath: "single-part"},
{input: "bucket/", expectedBucket: "bucket", expectedPath: ""},
{input: "", expectedBucket: "", expectedPath: ""},
{
name: "Single part path",
path: "bucketname",
wantBucket: "bucketname",
wantCleanPath: "/",
},
{
name: "Path with bucket and folder",
path: "bucketname/folder",
wantBucket: "bucketname",
wantCleanPath: "folder",
},
{
name: "Path with multiple folders",
path: "bucketname/folder/subfolder",
wantBucket: "bucketname",
wantCleanPath: "folder/subfolder",
},
{
name: "Path with trailing slash",
path: "bucketname/",
wantBucket: "bucketname",
wantCleanPath: "/",
},
{
name: "Empty path",
path: "",
wantBucket: "",
wantCleanPath: "/",
},
}

for _, test := range tests {
bucket, path := getBucketFromPath(test.input)
assert.Equal(t, test.expectedBucket, bucket)
assert.Equal(t, test.expectedPath, path)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
bucket, cleanPath := getBucketFromPath(tt.path)
assert.Equal(t, tt.wantBucket, bucket)
assert.Equal(t, tt.wantCleanPath, cleanPath)
})
}
}

Expand All @@ -74,8 +102,8 @@ func TestNewLocalWriter(t *testing.T) {
c := &models.Common{
Directory: t.TempDir(),
}

writer, err := newLocalWriter(b, c)
ctx := context.Background()
writer, err := newLocalWriter(ctx, b, c)
assert.NoError(t, err)
assert.NotNil(t, writer)
assert.Equal(t, testLocalType, writer.GetType())
Expand All @@ -85,13 +113,13 @@ func TestNewLocalWriter(t *testing.T) {
}
c = &models.Common{}

writer, err = newLocalWriter(b, c)
writer, err = newLocalWriter(ctx, b, c)
assert.NoError(t, err)
assert.NotNil(t, writer)
assert.Equal(t, testLocalType, writer.GetType())

b = &models.Backup{}
writer, err = newLocalWriter(b, c)
writer, err = newLocalWriter(ctx, b, c)
assert.Error(t, err)
assert.Nil(t, writer)
}
Expand Down Expand Up @@ -188,8 +216,7 @@ func TestGcpWriter(t *testing.T) {
assert.Equal(t, testGcpType, writer.GetType())

b = &models.Backup{
OutputFile: t.TempDir() + testFileName,
RemoveFiles: true,
OutputFile: t.TempDir() + testFileName,
}
c = &models.Common{}

Expand Down Expand Up @@ -243,8 +270,7 @@ func TestAzureWriter(t *testing.T) {
assert.Equal(t, testAzureType, writer.GetType())

b = &models.Backup{
OutputFile: t.TempDir() + testFileName,
RemoveFiles: true,
OutputFile: t.TempDir() + testFileName,
}
c = &models.Common{}

Expand Down
5 changes: 5 additions & 0 deletions cmd/internal/flags/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,11 @@ func (f *Backup) NewFlagSet() *pflag.FlagSet {
false,
"Specifies how to perform scan. If set to true, we launch parallel workers for nodes;\n"+
"otherwise workers run in parallel for partitions.")
// After implementing --continue and --estimate add this line here:
// "This option is mutually exclusive to --continue and --estimate."
flagSet.BoolVar(&f.RemoveArtifacts, "remove-artifacts",
false,
"Remove existing backup file (-o) or files (-d) without performing a backup.")

return flagSet
}
Expand Down
5 changes: 4 additions & 1 deletion cmd/internal/flags/backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ func TestBackup_NewFlagSet(t *testing.T) {
"--no-bins",
"--sleep-between-retries", "10",
"--filter-exp", "encoded-filter-exp",
"--parallel-nodes", "true",
"--parallel-nodes",
"--remove-artifacts",
}

err := flagSet.Parse(args)
Expand All @@ -54,6 +55,7 @@ func TestBackup_NewFlagSet(t *testing.T) {
assert.Equal(t, 10, result.SleepBetweenRetries, "The sleep-between-retries flag should be parsed correctly")
assert.Equal(t, "encoded-filter-exp", result.FilterExpression, "The filter-exp flag should be parsed correctly")
assert.Equal(t, true, result.ParallelNodes, "The parallel-nodes flag should be parsed correctly")
assert.Equal(t, true, result.RemoveArtifacts, "The remove-artifacts flag should be parsed correctly")
}

func TestBackup_NewFlagSet_DefaultValues(t *testing.T) {
Expand All @@ -77,4 +79,5 @@ func TestBackup_NewFlagSet_DefaultValues(t *testing.T) {
assert.Equal(t, 5, result.SleepBetweenRetries, "The default value for sleep-between-retries should be 5")
assert.Equal(t, "", result.FilterExpression, "The default value for filter-exp should be an empty string")
assert.Equal(t, false, result.ParallelNodes, "The default value for parallel-nodes should be false")
assert.Equal(t, false, result.RemoveArtifacts, "The default value for remove-artifacts should be false")
}
6 changes: 6 additions & 0 deletions cmd/internal/models/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,10 @@ type Backup struct {
SleepBetweenRetries int
FilterExpression string
ParallelNodes bool
RemoveArtifacts bool
}

// ShouldClearTarget check if we should clean target directory.
func (b *Backup) ShouldClearTarget() bool {
return b.RemoveFiles || b.RemoveArtifacts
}
Loading
Loading