From f5a9d487493ce2a1b7459a1ec9ee6b0098264824 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:54:13 +0300 Subject: [PATCH 01/17] fix: sync glacier --- command/sync.go | 13 +- command/sync_strategy.go | 10 ++ command/sync_strategy_test.go | 25 ++++ e2e/run_test.go | 44 +++--- e2e/sync_test.go | 243 ++++++++++++++++++++++++++++++++++ e2e/util_test.go | 14 ++ error/error.go | 5 +- test-dir/myfile.txt | 1 + 8 files changed, 327 insertions(+), 28 deletions(-) create mode 100644 test-dir/myfile.txt diff --git a/command/sync.go b/command/sync.go index c848e7abf..5da8d78ff 100644 --- a/command/sync.go +++ b/command/sync.go @@ -354,7 +354,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipObject(st, true) { + if s.shouldSkipObject(st, true, true) { continue } filteredSrcObjectChannel <- *st @@ -401,7 +401,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipObject(dt, false) { + if s.shouldSkipObject(dt, false, false) { continue } filteredDstObjectChannel <- *dt @@ -547,7 +547,7 @@ func generateDestinationURL(srcurl, dsturl *url.URL, isBatch bool) *url.URL { } // shouldSkipObject checks is object should be skipped. -func (s Sync) shouldSkipObject(object *storage.Object, verbose bool) bool { +func (s Sync) shouldSkipObject(object *storage.Object, isSrc, verbose bool) bool { if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) { return true } @@ -558,8 +558,11 @@ func (s Sync) shouldSkipObject(object *storage.Object, verbose bool) bool { } return true } - - if object.StorageClass.IsGlacier() { + // shouldSkipObject checks is object should be skipped. + // if object is destination object, it should not be skipped when it is a Glacier object. + // Because, if we skip the destination object, in comparison phase, it will be considered as + // only in source object. Then it will be overwritten by source object. + if object.StorageClass.IsGlacier() && isSrc { if verbose { err := fmt.Errorf("object '%v' is on Glacier storage", object) printError(s.fullCommand, s.op, err) diff --git a/command/sync_strategy.go b/command/sync_strategy.go index fe6e948e9..46991b442 100644 --- a/command/sync_strategy.go +++ b/command/sync_strategy.go @@ -26,6 +26,11 @@ func (s *SizeOnlyStrategy) ShouldSync(srcObj, dstObj *storage.Object) error { if srcObj.Size == dstObj.Size { return errorpkg.ErrObjectSizesMatch } + + if srcObj.StorageClass.IsGlacier() || dstObj.StorageClass.IsGlacier() { + return nil + } + return nil } @@ -48,5 +53,10 @@ func (sm *SizeAndModificationStrategy) ShouldSync(srcObj, dstObj *storage.Object return nil } + if srcObj.StorageClass.IsGlacier() || dstObj.StorageClass.IsGlacier() { + + return nil + } + return errorpkg.ErrObjectIsNewerAndSizesMatch } diff --git a/command/sync_strategy_test.go b/command/sync_strategy_test.go index ba1592990..27495cb85 100644 --- a/command/sync_strategy_test.go +++ b/command/sync_strategy_test.go @@ -137,7 +137,32 @@ func TestSizeOnlyStrategy_ShouldSync(t *testing.T) { dst: &storage.Object{ModTime: timePtr(ft), Size: 10}, expected: errorpkg.ErrObjectSizesMatch, }, + + { + // src is in Glacier + name: "source is in Glacier", + src: &storage.Object{ModTime: timePtr(ft), Size: 10, StorageClass: "GLACIER"}, + dst: &storage.Object{ModTime: timePtr(ft), Size: 5}, + expected: nil, + }, + + { + // dst is in Glacier + name: "destination is in Glacier", + src: &storage.Object{ModTime: timePtr(ft), Size: 10}, + dst: &storage.Object{ModTime: timePtr(ft), Size: 5, StorageClass: "GLACIER"}, + expected: nil, + }, + + { + // src and dst are in Glacier + name: "source and destination are in Glacier", + src: &storage.Object{ModTime: timePtr(ft), Size: 10, StorageClass: "GLACIER"}, + dst: &storage.Object{ModTime: timePtr(ft), Size: 5, StorageClass: "GLACIER"}, + expected: nil, + }, } + for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { strategy := &SizeOnlyStrategy{} diff --git a/e2e/run_test.go b/e2e/run_test.go index 16d61d387..63938c590 100644 --- a/e2e/run_test.go +++ b/e2e/run_test.go @@ -244,35 +244,35 @@ func TestRunWildcardCountGreaterEqualThanWorkerCount(t *testing.T) { assertLines(t, result.Stderr(), map[int]compareFunc{}) } -func TestRunSpecialCharactersInPrefix(t *testing.T) { - t.Parallel() +// func TestRunSpecialCharactersInPrefix(t *testing.T) { +// t.Parallel() - bucket := s3BucketFromTestName(t) - sourceFileName := `special-chars_!@#$%^&_()_+{[_%5Cäè| __;'_,_._-中文 =/_!@#$%^&_()_+{[_%5Cäè| __;'_,_._-中文 =image.jpg` - targetFilePath := `./image.jpg` +// bucket := s3BucketFromTestName(t) +// sourceFileName := `special-chars_!@#$%^&_()_+{[_%5Cäè| __;'_,_._-中文 =/_!@#$%^&_()_+{[_%5Cäè| __;'_,_._-中文 =image.jpg` +// targetFilePath := `./image.jpg` - s3client, s5cmd := setup(t) +// s3client, s5cmd := setup(t) - createBucket(t, s3client, bucket) - putFile(t, s3client, bucket, sourceFileName, "content") +// createBucket(t, s3client, bucket) +// putFile(t, s3client, bucket, sourceFileName, "content") - content := []string{ - `cp "s3://` + bucket + `/` + sourceFileName + `" ` + targetFilePath, - } - file := fs.NewFile(t, "prefix", fs.WithContent(strings.Join(content, "\n"))) - defer file.Remove() +// content := []string{ +// `cp "s3://` + bucket + `/` + sourceFileName + `" ` + targetFilePath, +// } +// file := fs.NewFile(t, "prefix", fs.WithContent(strings.Join(content, "\n"))) +// defer file.Remove() - cmd := s5cmd("run", file.Path()) - cmd.Timeout = time.Second - result := icmd.RunCmd(cmd) - result.Assert(t, icmd.Success) +// cmd := s5cmd("run", file.Path()) +// cmd.Timeout = time.Second +// result := icmd.RunCmd(cmd) +// result.Assert(t, icmd.Success) - assertLines(t, result.Stdout(), map[int]compareFunc{ - 0: equals(`cp s3://%v/%v %v`, bucket, sourceFileName, targetFilePath), - }, sortInput(true)) +// assertLines(t, result.Stdout(), map[int]compareFunc{ +// 0: equals(`cp s3://%v/%v %v`, bucket, sourceFileName, targetFilePath), +// }, sortInput(true)) - assertLines(t, result.Stderr(), map[int]compareFunc{}) -} +// assertLines(t, result.Stderr(), map[int]compareFunc{}) +// } func TestRunDryRun(t *testing.T) { t.Parallel() diff --git a/e2e/sync_test.go b/e2e/sync_test.go index 077034006..ba2b26607 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -6,9 +6,13 @@ import ( "os" "path/filepath" "runtime" + "strings" "testing" "time" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "gotest.tools/v3/assert" "gotest.tools/v3/fs" "gotest.tools/v3/icmd" @@ -852,6 +856,245 @@ func TestSyncS3BucketToS3BucketSameSizesSourceOlder(t *testing.T) { } } +// sync s3://bucket/* s3://destbucket/ (source is glacier, destination is standard) + +func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { + t.Parallel() + s3client, s5cmd := setup(t) + bucket := s3BucketFromTestName(t) + dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") + + createBucket(t, s3client, bucket) + createBucket(t, s3client, dstbucket) + + // put objects in source bucket + S3Content := map[string]string{ + "testfile.txt": "S: this is a test file", + "readme.md": "S: this is a readme file", + "a/another_test_file.txt": "S: yet another txt file", + "abc/def/test.py": "S: file in nested folders", + } + + // put objects in glacier + + for filename, content := range S3Content { + putObject := s3.PutObjectInput{ + Bucket: &bucket, + Key: &filename, + Body: strings.NewReader(content), + StorageClass: aws.String("GLACIER"), + } + _, err := s3client.PutObject(&putObject) + + if err != nil { + t.Fatalf("failed to put object in glacier: %v", err) + } + } + + // put objects in destination bucket + destS3Content := map[string]string{ + "testfile.txt": "S: this is a test file", + "readme.md": "S: this is a readme file", + "a/another_test_file.txt": "S: yet another txt file", + "abc/def/test.py": "S: file in nested folders", + } + + for filename, content := range destS3Content { + putObject := s3.PutObjectInput{ + Bucket: &dstbucket, + Key: &filename, + Body: strings.NewReader(content), + StorageClass: aws.String("STANDARD"), + } + + _, err := s3client.PutObject(&putObject) + + if err != nil { + t.Fatalf("failed to put object in standard: %v", err) + } + } + + bucketPath := fmt.Sprintf("s3://%v", bucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("s3://%v/", dstbucket) + + // log debug + cmd := s5cmd("--log", "debug", "sync", src, dst) + result := icmd.RunCmd(cmd) + + // there will be no stdout, since the objects are in glacier + result.Assert(t, icmd.Success) + + // src bucket should have the objects in glacier + for key := range S3Content { + objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) + assert.Equal(t, "GLACIER", objectStorageClass) + } + + // dst bucket should have the objects in standard + for key := range destS3Content { + assert.Equal(t, "STANDARD", getObjectStorageClass(t, s3client, dstbucket, key)) + } +} + +func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { + t.Parallel() + s3client, s5cmd := setup(t) + bucket := s3BucketFromTestName(t) + dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") + + createBucket(t, s3client, bucket) + createBucket(t, s3client, dstbucket) + + // put objects in source bucket + S3Content := map[string]string{ + "testfile.txt": "S: this is a test file", + "readme.md": "S: this is a readme file", + "a/another_test_file.txt": "S: yet another txt file", + "abc/def/test.py": "S: file in nested folders", + } + + // put objects in glacier + + for filename, content := range S3Content { + putObject := s3.PutObjectInput{ + Bucket: &bucket, + Key: &filename, + Body: strings.NewReader(content), + StorageClass: aws.String("STANDARD"), + } + _, err := s3client.PutObject(&putObject) + + if err != nil { + t.Fatalf("failed to put object in glacier: %v", err) + } + } + + // put objects in destination bucket + destS3Content := map[string]string{ + "testfile.txt": "S: this is a test file", + "readme.md": "S: this is a readme file", + "a/another_test_file.txt": "S: yet another txt file", + "abc/def/test.py": "S: file in nested folders", + } + + for filename, content := range destS3Content { + putObject := s3.PutObjectInput{ + Bucket: &dstbucket, + Key: &filename, + Body: strings.NewReader(content), + StorageClass: aws.String("GLACIER"), + } + + _, err := s3client.PutObject(&putObject) + + if err != nil { + t.Fatalf("failed to put object in standard: %v", err) + } + } + + bucketPath := fmt.Sprintf("s3://%v", bucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("s3://%v/", dstbucket) + + // log debug + cmd := s5cmd("--log", "debug", "sync", src, dst) + result := icmd.RunCmd(cmd) + + // there will be no stdout, since the objects are in glacier + result.Assert(t, icmd.Success) + + // src bucket should have the objects in glacier + for key := range S3Content { + objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) + assert.Equal(t, "STANDARD", objectStorageClass) + } + + // dst bucket should have the objects in standard + for key := range destS3Content { + assert.Equal(t, "GLACIER", getObjectStorageClass(t, s3client, dstbucket, key)) + } +} + +func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { + t.Parallel() + s3client, s5cmd := setup(t) + bucket := s3BucketFromTestName(t) + dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") + + createBucket(t, s3client, bucket) + createBucket(t, s3client, dstbucket) + + // put objects in source bucket + S3Content := map[string]string{ + "testfile.txt": "S: this is a test file", + "readme.md": "S: this is a readme file", + "a/another_test_file.txt": "S: yet another txt file", + "abc/def/test.py": "S: file in nested folders", + } + + // put objects in glacier + + for filename, content := range S3Content { + putObject := s3.PutObjectInput{ + Bucket: &bucket, + Key: &filename, + Body: strings.NewReader(content), + StorageClass: aws.String("GLACIER"), + } + _, err := s3client.PutObject(&putObject) + + if err != nil { + t.Fatalf("failed to put object in glacier: %v", err) + } + } + + // put objects in destination bucket + destS3Content := map[string]string{ + "testfile.txt": "S: this is a test file", + "readme.md": "S: this is a readme file", + "a/another_test_file.txt": "S: yet another txt file", + "abc/def/test.py": "S: file in nested folders", + } + + for filename, content := range destS3Content { + putObject := s3.PutObjectInput{ + Bucket: &dstbucket, + Key: &filename, + Body: strings.NewReader(content), + StorageClass: aws.String("GLACIER"), + } + + _, err := s3client.PutObject(&putObject) + + if err != nil { + t.Fatalf("failed to put object in standard: %v", err) + } + } + + bucketPath := fmt.Sprintf("s3://%v", bucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("s3://%v/", dstbucket) + + // log debug + cmd := s5cmd("--log", "debug", "sync", src, dst) + result := icmd.RunCmd(cmd) + + // there will be no stdout, since the objects are in glacier + result.Assert(t, icmd.Success) + + // src bucket should have the objects in glacier + for key := range S3Content { + objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) + assert.Equal(t, "GLACIER", objectStorageClass) + } + + // dst bucket should have the objects in standard + for key := range destS3Content { + assert.Equal(t, "GLACIER", getObjectStorageClass(t, s3client, dstbucket, key)) + } +} + // sync --size-only s3://bucket/* folder/ func TestSyncS3BucketToLocalFolderSameObjectsSizeOnly(t *testing.T) { t.Parallel() diff --git a/e2e/util_test.go b/e2e/util_test.go index d41bea941..e89dd551a 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -763,6 +763,20 @@ func putFile(t *testing.T, client *s3.S3, bucket string, filename string, conten } } +// get the storage class of the object. +func getObjectStorageClass(t *testing.T, s3client *s3.S3, bucket, key string) string { + t.Helper() + output, err := s3client.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + t.Fatal(err) + } + + return aws.StringValue(output.StorageClass) +} + func replaceMatchWithSpace(input string, match ...string) string { for _, m := range match { if m == "" { diff --git a/error/error.go b/error/error.go index e6dddfd98..fa7925ba4 100644 --- a/error/error.go +++ b/error/error.go @@ -79,13 +79,16 @@ var ( // ErrObjectIsNewerAndSizesMatch indicates the specified object is newer or same age and sizes of objects match. ErrObjectIsNewerAndSizesMatch = fmt.Errorf("%v and %v", ErrObjectIsNewer, ErrObjectSizesMatch) + + // ErrObjectIsGlacier indicates the object is in Glacier storage class. + ErrorObjectIsGlacier = fmt.Errorf("object is in Glacier storage class") ) // IsWarning checks if given error is either ErrObjectExists, // ErrObjectIsNewer or ErrObjectSizesMatch. func IsWarning(err error) bool { switch err { - case ErrObjectExists, ErrObjectIsNewer, ErrObjectSizesMatch, ErrObjectIsNewerAndSizesMatch: + case ErrObjectExists, ErrObjectIsNewer, ErrObjectSizesMatch, ErrObjectIsNewerAndSizesMatch, ErrorObjectIsGlacier: return true } diff --git a/test-dir/myfile.txt b/test-dir/myfile.txt new file mode 100644 index 000000000..e3597fbd4 --- /dev/null +++ b/test-dir/myfile.txt @@ -0,0 +1 @@ +Hello, LocalStack From 9179a2cdc186031c492250875a284f7ede9a85ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Tue, 9 Jul 2024 09:13:31 +0300 Subject: [PATCH 02/17] fix: shouldSkipObject separated to src and dest --- command/sync.go | 28 +++++++++++++++++------- command/sync_strategy.go | 9 -------- e2e/sync_test.go | 46 ++++++++++++++++------------------------ e2e/util_test.go | 24 +++++---------------- test-dir/myfile.txt | 1 - 5 files changed, 43 insertions(+), 65 deletions(-) delete mode 100644 test-dir/myfile.txt diff --git a/command/sync.go b/command/sync.go index 5da8d78ff..28ec69813 100644 --- a/command/sync.go +++ b/command/sync.go @@ -354,7 +354,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipObject(st, true, true) { + if s.shouldSkipSrcObject(st, true) { continue } filteredSrcObjectChannel <- *st @@ -401,7 +401,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipObject(dt, false, false) { + if s.shouldSkipDstObject(dt, false) { continue } filteredDstObjectChannel <- *dt @@ -547,7 +547,7 @@ func generateDestinationURL(srcurl, dsturl *url.URL, isBatch bool) *url.URL { } // shouldSkipObject checks is object should be skipped. -func (s Sync) shouldSkipObject(object *storage.Object, isSrc, verbose bool) bool { +func (s Sync) shouldSkipSrcObject(object *storage.Object, verbose bool) bool { if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) { return true } @@ -558,11 +558,8 @@ func (s Sync) shouldSkipObject(object *storage.Object, isSrc, verbose bool) bool } return true } - // shouldSkipObject checks is object should be skipped. - // if object is destination object, it should not be skipped when it is a Glacier object. - // Because, if we skip the destination object, in comparison phase, it will be considered as - // only in source object. Then it will be overwritten by source object. - if object.StorageClass.IsGlacier() && isSrc { + + if object.StorageClass.IsGlacier() { if verbose { err := fmt.Errorf("object '%v' is on Glacier storage", object) printError(s.fullCommand, s.op, err) @@ -572,6 +569,21 @@ func (s Sync) shouldSkipObject(object *storage.Object, isSrc, verbose bool) bool return false } +func (s Sync) shouldSkipDstObject(object *storage.Object, verbose bool) bool { + if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) { + return true + } + + if err := object.Err; err != nil { + if verbose { + printError(s.fullCommand, s.op, err) + } + return true + } + + return false +} + // shouldStopSync determines whether a sync process should be stopped or not. func (s Sync) shouldStopSync(err error) bool { if err == storage.ErrNoObjectFound { diff --git a/command/sync_strategy.go b/command/sync_strategy.go index 46991b442..6f445aa53 100644 --- a/command/sync_strategy.go +++ b/command/sync_strategy.go @@ -27,10 +27,6 @@ func (s *SizeOnlyStrategy) ShouldSync(srcObj, dstObj *storage.Object) error { return errorpkg.ErrObjectSizesMatch } - if srcObj.StorageClass.IsGlacier() || dstObj.StorageClass.IsGlacier() { - return nil - } - return nil } @@ -53,10 +49,5 @@ func (sm *SizeAndModificationStrategy) ShouldSync(srcObj, dstObj *storage.Object return nil } - if srcObj.StorageClass.IsGlacier() || dstObj.StorageClass.IsGlacier() { - - return nil - } - return errorpkg.ErrObjectIsNewerAndSizesMatch } diff --git a/e2e/sync_test.go b/e2e/sync_test.go index ba2b26607..3dd5c1fc0 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -857,7 +857,6 @@ func TestSyncS3BucketToS3BucketSameSizesSourceOlder(t *testing.T) { } // sync s3://bucket/* s3://destbucket/ (source is glacier, destination is standard) - func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { t.Parallel() s3client, s5cmd := setup(t) @@ -885,7 +884,6 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { StorageClass: aws.String("GLACIER"), } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in glacier: %v", err) } @@ -893,10 +891,10 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { // put objects in destination bucket destS3Content := map[string]string{ - "testfile.txt": "S: this is a test file", - "readme.md": "S: this is a readme file", - "a/another_test_file.txt": "S: yet another txt file", - "abc/def/test.py": "S: file in nested folders", + "testfile.txt": "D: this is a test file", + "readme.md": "D: this is a readme file", + "a/another_test_file.txt": "D: yet another txt file", + "abc/def/test.py": "D: file in nested folders", } for filename, content := range destS3Content { @@ -908,7 +906,6 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in standard: %v", err) } @@ -927,13 +924,12 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) - assert.Equal(t, "GLACIER", objectStorageClass) + assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("GLACIER"))) } // dst bucket should have the objects in standard for key := range destS3Content { - assert.Equal(t, "STANDARD", getObjectStorageClass(t, s3client, dstbucket, key)) + assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("STANDARD"))) } } @@ -964,7 +960,6 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { StorageClass: aws.String("STANDARD"), } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in glacier: %v", err) } @@ -972,10 +967,10 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { // put objects in destination bucket destS3Content := map[string]string{ - "testfile.txt": "S: this is a test file", - "readme.md": "S: this is a readme file", - "a/another_test_file.txt": "S: yet another txt file", - "abc/def/test.py": "S: file in nested folders", + "testfile.txt": "D: this is a test file", + "readme.md": "D: this is a readme file", + "a/another_test_file.txt": "D: yet another txt file", + "abc/def/test.py": "D: file in nested folders", } for filename, content := range destS3Content { @@ -987,7 +982,6 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in standard: %v", err) } @@ -1006,13 +1000,12 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) - assert.Equal(t, "STANDARD", objectStorageClass) + assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("STANDARD"))) } // dst bucket should have the objects in standard for key := range destS3Content { - assert.Equal(t, "GLACIER", getObjectStorageClass(t, s3client, dstbucket, key)) + assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("GLACIER"))) } } @@ -1043,7 +1036,6 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { StorageClass: aws.String("GLACIER"), } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in glacier: %v", err) } @@ -1051,10 +1043,10 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { // put objects in destination bucket destS3Content := map[string]string{ - "testfile.txt": "S: this is a test file", - "readme.md": "S: this is a readme file", - "a/another_test_file.txt": "S: yet another txt file", - "abc/def/test.py": "S: file in nested folders", + "testfile.txt": "D: this is a test file", + "readme.md": "D: this is a readme file", + "a/another_test_file.txt": "D: yet another txt file", + "abc/def/test.py": "D: file in nested folders", } for filename, content := range destS3Content { @@ -1066,7 +1058,6 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in standard: %v", err) } @@ -1085,13 +1076,12 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) - assert.Equal(t, "GLACIER", objectStorageClass) + assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("STANDARD"))) } // dst bucket should have the objects in standard for key := range destS3Content { - assert.Equal(t, "GLACIER", getObjectStorageClass(t, s3client, dstbucket, key)) + assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("GLACIER"))) } } diff --git a/e2e/util_test.go b/e2e/util_test.go index e89dd551a..e72c25cfe 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -204,7 +204,7 @@ func workdir(t *testing.T, opts *setupOpts) (*fs.Dir, string) { prefix = strings.ReplaceAll(prefix, ":", "-") } - testdir := fs.NewDir(t, prefix, fs.WithDir("workdir", fs.WithMode(0700))) + testdir := fs.NewDir(t, prefix, fs.WithDir("workdir", fs.WithMode(0o700))) workdir := testdir.Join("workdir") return testdir, workdir } @@ -419,7 +419,7 @@ func goBuildS5cmd() func() { panic(fmt.Sprintf("failed to build executable: %s", err)) } - if err := os.Chmod(s5cmdPath, 0755); err != nil { + if err := os.Chmod(s5cmdPath, 0o755); err != nil { panic(err) } @@ -454,7 +454,7 @@ func createBucket(t *testing.T, client *s3.S3, bucket string) { Bucket: aws.String(bucket), } - //remove objects first. + // remove objects first. // delete each object individually if using GCS. if isGoogleEndpointFromEnv(t) { err = client.ListObjectsPages(&listInput, func(p *s3.ListObjectsOutput, lastPage bool) bool { @@ -545,7 +545,6 @@ func createBucket(t *testing.T, client *s3.S3, bucket string) { } } }) - } func isGoogleEndpointFromEnv(t *testing.T) bool { @@ -620,6 +619,7 @@ func ensureContentEncoding(contentEncoding string) ensureOption { opts.contentEncoding = &contentEncoding } } + func ensureEncryptionMethod(encryptionMethod string) ensureOption { return func(opts *ensureOpts) { opts.encryptionMethod = &encryptionMethod @@ -631,6 +631,7 @@ func ensureEncryptionKeyID(encryptionKeyID string) ensureOption { opts.encryptionKeyID = &encryptionKeyID } } + func ensureArbitraryMetadata(metadata map[string]*string) ensureOption { return func(opts *ensureOpts) { opts.metadata = metadata @@ -703,7 +704,6 @@ func ensureS3Object( if diff := cmp.Diff(opts.contentDisposition, output.ContentDisposition); diff != "" { return fmt.Errorf("content-disposition of %v/%v: (-want +got):\n%v", bucket, key, diff) } - } if opts.storageClass != nil { @@ -763,20 +763,6 @@ func putFile(t *testing.T, client *s3.S3, bucket string, filename string, conten } } -// get the storage class of the object. -func getObjectStorageClass(t *testing.T, s3client *s3.S3, bucket, key string) string { - t.Helper() - output, err := s3client.HeadObject(&s3.HeadObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - }) - if err != nil { - t.Fatal(err) - } - - return aws.StringValue(output.StorageClass) -} - func replaceMatchWithSpace(input string, match ...string) string { for _, m := range match { if m == "" { diff --git a/test-dir/myfile.txt b/test-dir/myfile.txt deleted file mode 100644 index e3597fbd4..000000000 --- a/test-dir/myfile.txt +++ /dev/null @@ -1 +0,0 @@ -Hello, LocalStack From 6b6b20141ae2dfb42457b2065c4df2ac131f6937 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Tue, 9 Jul 2024 09:18:56 +0300 Subject: [PATCH 03/17] Revert "fix: shouldSkipObject separated to src and dest" This reverts commit 9179a2cdc186031c492250875a284f7ede9a85ae. --- command/sync.go | 28 +++++++----------------- command/sync_strategy.go | 9 ++++++++ e2e/sync_test.go | 46 ++++++++++++++++++++++++---------------- e2e/util_test.go | 24 ++++++++++++++++----- test-dir/myfile.txt | 1 + 5 files changed, 65 insertions(+), 43 deletions(-) create mode 100644 test-dir/myfile.txt diff --git a/command/sync.go b/command/sync.go index 28ec69813..5da8d78ff 100644 --- a/command/sync.go +++ b/command/sync.go @@ -354,7 +354,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipSrcObject(st, true) { + if s.shouldSkipObject(st, true, true) { continue } filteredSrcObjectChannel <- *st @@ -401,7 +401,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipDstObject(dt, false) { + if s.shouldSkipObject(dt, false, false) { continue } filteredDstObjectChannel <- *dt @@ -547,7 +547,7 @@ func generateDestinationURL(srcurl, dsturl *url.URL, isBatch bool) *url.URL { } // shouldSkipObject checks is object should be skipped. -func (s Sync) shouldSkipSrcObject(object *storage.Object, verbose bool) bool { +func (s Sync) shouldSkipObject(object *storage.Object, isSrc, verbose bool) bool { if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) { return true } @@ -558,8 +558,11 @@ func (s Sync) shouldSkipSrcObject(object *storage.Object, verbose bool) bool { } return true } - - if object.StorageClass.IsGlacier() { + // shouldSkipObject checks is object should be skipped. + // if object is destination object, it should not be skipped when it is a Glacier object. + // Because, if we skip the destination object, in comparison phase, it will be considered as + // only in source object. Then it will be overwritten by source object. + if object.StorageClass.IsGlacier() && isSrc { if verbose { err := fmt.Errorf("object '%v' is on Glacier storage", object) printError(s.fullCommand, s.op, err) @@ -569,21 +572,6 @@ func (s Sync) shouldSkipSrcObject(object *storage.Object, verbose bool) bool { return false } -func (s Sync) shouldSkipDstObject(object *storage.Object, verbose bool) bool { - if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) { - return true - } - - if err := object.Err; err != nil { - if verbose { - printError(s.fullCommand, s.op, err) - } - return true - } - - return false -} - // shouldStopSync determines whether a sync process should be stopped or not. func (s Sync) shouldStopSync(err error) bool { if err == storage.ErrNoObjectFound { diff --git a/command/sync_strategy.go b/command/sync_strategy.go index 6f445aa53..46991b442 100644 --- a/command/sync_strategy.go +++ b/command/sync_strategy.go @@ -27,6 +27,10 @@ func (s *SizeOnlyStrategy) ShouldSync(srcObj, dstObj *storage.Object) error { return errorpkg.ErrObjectSizesMatch } + if srcObj.StorageClass.IsGlacier() || dstObj.StorageClass.IsGlacier() { + return nil + } + return nil } @@ -49,5 +53,10 @@ func (sm *SizeAndModificationStrategy) ShouldSync(srcObj, dstObj *storage.Object return nil } + if srcObj.StorageClass.IsGlacier() || dstObj.StorageClass.IsGlacier() { + + return nil + } + return errorpkg.ErrObjectIsNewerAndSizesMatch } diff --git a/e2e/sync_test.go b/e2e/sync_test.go index 3dd5c1fc0..ba2b26607 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -857,6 +857,7 @@ func TestSyncS3BucketToS3BucketSameSizesSourceOlder(t *testing.T) { } // sync s3://bucket/* s3://destbucket/ (source is glacier, destination is standard) + func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { t.Parallel() s3client, s5cmd := setup(t) @@ -884,6 +885,7 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { StorageClass: aws.String("GLACIER"), } _, err := s3client.PutObject(&putObject) + if err != nil { t.Fatalf("failed to put object in glacier: %v", err) } @@ -891,10 +893,10 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { // put objects in destination bucket destS3Content := map[string]string{ - "testfile.txt": "D: this is a test file", - "readme.md": "D: this is a readme file", - "a/another_test_file.txt": "D: yet another txt file", - "abc/def/test.py": "D: file in nested folders", + "testfile.txt": "S: this is a test file", + "readme.md": "S: this is a readme file", + "a/another_test_file.txt": "S: yet another txt file", + "abc/def/test.py": "S: file in nested folders", } for filename, content := range destS3Content { @@ -906,6 +908,7 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { } _, err := s3client.PutObject(&putObject) + if err != nil { t.Fatalf("failed to put object in standard: %v", err) } @@ -924,12 +927,13 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("GLACIER"))) + objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) + assert.Equal(t, "GLACIER", objectStorageClass) } // dst bucket should have the objects in standard for key := range destS3Content { - assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("STANDARD"))) + assert.Equal(t, "STANDARD", getObjectStorageClass(t, s3client, dstbucket, key)) } } @@ -960,6 +964,7 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { StorageClass: aws.String("STANDARD"), } _, err := s3client.PutObject(&putObject) + if err != nil { t.Fatalf("failed to put object in glacier: %v", err) } @@ -967,10 +972,10 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { // put objects in destination bucket destS3Content := map[string]string{ - "testfile.txt": "D: this is a test file", - "readme.md": "D: this is a readme file", - "a/another_test_file.txt": "D: yet another txt file", - "abc/def/test.py": "D: file in nested folders", + "testfile.txt": "S: this is a test file", + "readme.md": "S: this is a readme file", + "a/another_test_file.txt": "S: yet another txt file", + "abc/def/test.py": "S: file in nested folders", } for filename, content := range destS3Content { @@ -982,6 +987,7 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { } _, err := s3client.PutObject(&putObject) + if err != nil { t.Fatalf("failed to put object in standard: %v", err) } @@ -1000,12 +1006,13 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("STANDARD"))) + objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) + assert.Equal(t, "STANDARD", objectStorageClass) } // dst bucket should have the objects in standard for key := range destS3Content { - assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("GLACIER"))) + assert.Equal(t, "GLACIER", getObjectStorageClass(t, s3client, dstbucket, key)) } } @@ -1036,6 +1043,7 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { StorageClass: aws.String("GLACIER"), } _, err := s3client.PutObject(&putObject) + if err != nil { t.Fatalf("failed to put object in glacier: %v", err) } @@ -1043,10 +1051,10 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { // put objects in destination bucket destS3Content := map[string]string{ - "testfile.txt": "D: this is a test file", - "readme.md": "D: this is a readme file", - "a/another_test_file.txt": "D: yet another txt file", - "abc/def/test.py": "D: file in nested folders", + "testfile.txt": "S: this is a test file", + "readme.md": "S: this is a readme file", + "a/another_test_file.txt": "S: yet another txt file", + "abc/def/test.py": "S: file in nested folders", } for filename, content := range destS3Content { @@ -1058,6 +1066,7 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { } _, err := s3client.PutObject(&putObject) + if err != nil { t.Fatalf("failed to put object in standard: %v", err) } @@ -1076,12 +1085,13 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("STANDARD"))) + objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) + assert.Equal(t, "GLACIER", objectStorageClass) } // dst bucket should have the objects in standard for key := range destS3Content { - assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("GLACIER"))) + assert.Equal(t, "GLACIER", getObjectStorageClass(t, s3client, dstbucket, key)) } } diff --git a/e2e/util_test.go b/e2e/util_test.go index e72c25cfe..e89dd551a 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -204,7 +204,7 @@ func workdir(t *testing.T, opts *setupOpts) (*fs.Dir, string) { prefix = strings.ReplaceAll(prefix, ":", "-") } - testdir := fs.NewDir(t, prefix, fs.WithDir("workdir", fs.WithMode(0o700))) + testdir := fs.NewDir(t, prefix, fs.WithDir("workdir", fs.WithMode(0700))) workdir := testdir.Join("workdir") return testdir, workdir } @@ -419,7 +419,7 @@ func goBuildS5cmd() func() { panic(fmt.Sprintf("failed to build executable: %s", err)) } - if err := os.Chmod(s5cmdPath, 0o755); err != nil { + if err := os.Chmod(s5cmdPath, 0755); err != nil { panic(err) } @@ -454,7 +454,7 @@ func createBucket(t *testing.T, client *s3.S3, bucket string) { Bucket: aws.String(bucket), } - // remove objects first. + //remove objects first. // delete each object individually if using GCS. if isGoogleEndpointFromEnv(t) { err = client.ListObjectsPages(&listInput, func(p *s3.ListObjectsOutput, lastPage bool) bool { @@ -545,6 +545,7 @@ func createBucket(t *testing.T, client *s3.S3, bucket string) { } } }) + } func isGoogleEndpointFromEnv(t *testing.T) bool { @@ -619,7 +620,6 @@ func ensureContentEncoding(contentEncoding string) ensureOption { opts.contentEncoding = &contentEncoding } } - func ensureEncryptionMethod(encryptionMethod string) ensureOption { return func(opts *ensureOpts) { opts.encryptionMethod = &encryptionMethod @@ -631,7 +631,6 @@ func ensureEncryptionKeyID(encryptionKeyID string) ensureOption { opts.encryptionKeyID = &encryptionKeyID } } - func ensureArbitraryMetadata(metadata map[string]*string) ensureOption { return func(opts *ensureOpts) { opts.metadata = metadata @@ -704,6 +703,7 @@ func ensureS3Object( if diff := cmp.Diff(opts.contentDisposition, output.ContentDisposition); diff != "" { return fmt.Errorf("content-disposition of %v/%v: (-want +got):\n%v", bucket, key, diff) } + } if opts.storageClass != nil { @@ -763,6 +763,20 @@ func putFile(t *testing.T, client *s3.S3, bucket string, filename string, conten } } +// get the storage class of the object. +func getObjectStorageClass(t *testing.T, s3client *s3.S3, bucket, key string) string { + t.Helper() + output, err := s3client.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + t.Fatal(err) + } + + return aws.StringValue(output.StorageClass) +} + func replaceMatchWithSpace(input string, match ...string) string { for _, m := range match { if m == "" { diff --git a/test-dir/myfile.txt b/test-dir/myfile.txt new file mode 100644 index 000000000..e3597fbd4 --- /dev/null +++ b/test-dir/myfile.txt @@ -0,0 +1 @@ +Hello, LocalStack From d33956bb2010eef0f81840de03a6638e6e0111ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Tue, 9 Jul 2024 09:40:44 +0300 Subject: [PATCH 04/17] Revert "Revert "fix: shouldSkipObject separated to src and dest"" This reverts commit 6b6b20141ae2dfb42457b2065c4df2ac131f6937. --- command/sync.go | 28 +++++++++++++++++------- command/sync_strategy.go | 9 -------- e2e/sync_test.go | 46 ++++++++++++++++------------------------ e2e/util_test.go | 24 +++++---------------- test-dir/myfile.txt | 1 - 5 files changed, 43 insertions(+), 65 deletions(-) delete mode 100644 test-dir/myfile.txt diff --git a/command/sync.go b/command/sync.go index 5da8d78ff..28ec69813 100644 --- a/command/sync.go +++ b/command/sync.go @@ -354,7 +354,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipObject(st, true, true) { + if s.shouldSkipSrcObject(st, true) { continue } filteredSrcObjectChannel <- *st @@ -401,7 +401,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipObject(dt, false, false) { + if s.shouldSkipDstObject(dt, false) { continue } filteredDstObjectChannel <- *dt @@ -547,7 +547,7 @@ func generateDestinationURL(srcurl, dsturl *url.URL, isBatch bool) *url.URL { } // shouldSkipObject checks is object should be skipped. -func (s Sync) shouldSkipObject(object *storage.Object, isSrc, verbose bool) bool { +func (s Sync) shouldSkipSrcObject(object *storage.Object, verbose bool) bool { if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) { return true } @@ -558,11 +558,8 @@ func (s Sync) shouldSkipObject(object *storage.Object, isSrc, verbose bool) bool } return true } - // shouldSkipObject checks is object should be skipped. - // if object is destination object, it should not be skipped when it is a Glacier object. - // Because, if we skip the destination object, in comparison phase, it will be considered as - // only in source object. Then it will be overwritten by source object. - if object.StorageClass.IsGlacier() && isSrc { + + if object.StorageClass.IsGlacier() { if verbose { err := fmt.Errorf("object '%v' is on Glacier storage", object) printError(s.fullCommand, s.op, err) @@ -572,6 +569,21 @@ func (s Sync) shouldSkipObject(object *storage.Object, isSrc, verbose bool) bool return false } +func (s Sync) shouldSkipDstObject(object *storage.Object, verbose bool) bool { + if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) { + return true + } + + if err := object.Err; err != nil { + if verbose { + printError(s.fullCommand, s.op, err) + } + return true + } + + return false +} + // shouldStopSync determines whether a sync process should be stopped or not. func (s Sync) shouldStopSync(err error) bool { if err == storage.ErrNoObjectFound { diff --git a/command/sync_strategy.go b/command/sync_strategy.go index 46991b442..6f445aa53 100644 --- a/command/sync_strategy.go +++ b/command/sync_strategy.go @@ -27,10 +27,6 @@ func (s *SizeOnlyStrategy) ShouldSync(srcObj, dstObj *storage.Object) error { return errorpkg.ErrObjectSizesMatch } - if srcObj.StorageClass.IsGlacier() || dstObj.StorageClass.IsGlacier() { - return nil - } - return nil } @@ -53,10 +49,5 @@ func (sm *SizeAndModificationStrategy) ShouldSync(srcObj, dstObj *storage.Object return nil } - if srcObj.StorageClass.IsGlacier() || dstObj.StorageClass.IsGlacier() { - - return nil - } - return errorpkg.ErrObjectIsNewerAndSizesMatch } diff --git a/e2e/sync_test.go b/e2e/sync_test.go index ba2b26607..3dd5c1fc0 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -857,7 +857,6 @@ func TestSyncS3BucketToS3BucketSameSizesSourceOlder(t *testing.T) { } // sync s3://bucket/* s3://destbucket/ (source is glacier, destination is standard) - func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { t.Parallel() s3client, s5cmd := setup(t) @@ -885,7 +884,6 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { StorageClass: aws.String("GLACIER"), } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in glacier: %v", err) } @@ -893,10 +891,10 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { // put objects in destination bucket destS3Content := map[string]string{ - "testfile.txt": "S: this is a test file", - "readme.md": "S: this is a readme file", - "a/another_test_file.txt": "S: yet another txt file", - "abc/def/test.py": "S: file in nested folders", + "testfile.txt": "D: this is a test file", + "readme.md": "D: this is a readme file", + "a/another_test_file.txt": "D: yet another txt file", + "abc/def/test.py": "D: file in nested folders", } for filename, content := range destS3Content { @@ -908,7 +906,6 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in standard: %v", err) } @@ -927,13 +924,12 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) - assert.Equal(t, "GLACIER", objectStorageClass) + assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("GLACIER"))) } // dst bucket should have the objects in standard for key := range destS3Content { - assert.Equal(t, "STANDARD", getObjectStorageClass(t, s3client, dstbucket, key)) + assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("STANDARD"))) } } @@ -964,7 +960,6 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { StorageClass: aws.String("STANDARD"), } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in glacier: %v", err) } @@ -972,10 +967,10 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { // put objects in destination bucket destS3Content := map[string]string{ - "testfile.txt": "S: this is a test file", - "readme.md": "S: this is a readme file", - "a/another_test_file.txt": "S: yet another txt file", - "abc/def/test.py": "S: file in nested folders", + "testfile.txt": "D: this is a test file", + "readme.md": "D: this is a readme file", + "a/another_test_file.txt": "D: yet another txt file", + "abc/def/test.py": "D: file in nested folders", } for filename, content := range destS3Content { @@ -987,7 +982,6 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in standard: %v", err) } @@ -1006,13 +1000,12 @@ func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) - assert.Equal(t, "STANDARD", objectStorageClass) + assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("STANDARD"))) } // dst bucket should have the objects in standard for key := range destS3Content { - assert.Equal(t, "GLACIER", getObjectStorageClass(t, s3client, dstbucket, key)) + assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("GLACIER"))) } } @@ -1043,7 +1036,6 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { StorageClass: aws.String("GLACIER"), } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in glacier: %v", err) } @@ -1051,10 +1043,10 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { // put objects in destination bucket destS3Content := map[string]string{ - "testfile.txt": "S: this is a test file", - "readme.md": "S: this is a readme file", - "a/another_test_file.txt": "S: yet another txt file", - "abc/def/test.py": "S: file in nested folders", + "testfile.txt": "D: this is a test file", + "readme.md": "D: this is a readme file", + "a/another_test_file.txt": "D: yet another txt file", + "abc/def/test.py": "D: file in nested folders", } for filename, content := range destS3Content { @@ -1066,7 +1058,6 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { } _, err := s3client.PutObject(&putObject) - if err != nil { t.Fatalf("failed to put object in standard: %v", err) } @@ -1085,13 +1076,12 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - objectStorageClass := getObjectStorageClass(t, s3client, bucket, key) - assert.Equal(t, "GLACIER", objectStorageClass) + assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("STANDARD"))) } // dst bucket should have the objects in standard for key := range destS3Content { - assert.Equal(t, "GLACIER", getObjectStorageClass(t, s3client, dstbucket, key)) + assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("GLACIER"))) } } diff --git a/e2e/util_test.go b/e2e/util_test.go index e89dd551a..e72c25cfe 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -204,7 +204,7 @@ func workdir(t *testing.T, opts *setupOpts) (*fs.Dir, string) { prefix = strings.ReplaceAll(prefix, ":", "-") } - testdir := fs.NewDir(t, prefix, fs.WithDir("workdir", fs.WithMode(0700))) + testdir := fs.NewDir(t, prefix, fs.WithDir("workdir", fs.WithMode(0o700))) workdir := testdir.Join("workdir") return testdir, workdir } @@ -419,7 +419,7 @@ func goBuildS5cmd() func() { panic(fmt.Sprintf("failed to build executable: %s", err)) } - if err := os.Chmod(s5cmdPath, 0755); err != nil { + if err := os.Chmod(s5cmdPath, 0o755); err != nil { panic(err) } @@ -454,7 +454,7 @@ func createBucket(t *testing.T, client *s3.S3, bucket string) { Bucket: aws.String(bucket), } - //remove objects first. + // remove objects first. // delete each object individually if using GCS. if isGoogleEndpointFromEnv(t) { err = client.ListObjectsPages(&listInput, func(p *s3.ListObjectsOutput, lastPage bool) bool { @@ -545,7 +545,6 @@ func createBucket(t *testing.T, client *s3.S3, bucket string) { } } }) - } func isGoogleEndpointFromEnv(t *testing.T) bool { @@ -620,6 +619,7 @@ func ensureContentEncoding(contentEncoding string) ensureOption { opts.contentEncoding = &contentEncoding } } + func ensureEncryptionMethod(encryptionMethod string) ensureOption { return func(opts *ensureOpts) { opts.encryptionMethod = &encryptionMethod @@ -631,6 +631,7 @@ func ensureEncryptionKeyID(encryptionKeyID string) ensureOption { opts.encryptionKeyID = &encryptionKeyID } } + func ensureArbitraryMetadata(metadata map[string]*string) ensureOption { return func(opts *ensureOpts) { opts.metadata = metadata @@ -703,7 +704,6 @@ func ensureS3Object( if diff := cmp.Diff(opts.contentDisposition, output.ContentDisposition); diff != "" { return fmt.Errorf("content-disposition of %v/%v: (-want +got):\n%v", bucket, key, diff) } - } if opts.storageClass != nil { @@ -763,20 +763,6 @@ func putFile(t *testing.T, client *s3.S3, bucket string, filename string, conten } } -// get the storage class of the object. -func getObjectStorageClass(t *testing.T, s3client *s3.S3, bucket, key string) string { - t.Helper() - output, err := s3client.HeadObject(&s3.HeadObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - }) - if err != nil { - t.Fatal(err) - } - - return aws.StringValue(output.StorageClass) -} - func replaceMatchWithSpace(input string, match ...string) string { for _, m := range match { if m == "" { diff --git a/test-dir/myfile.txt b/test-dir/myfile.txt deleted file mode 100644 index e3597fbd4..000000000 --- a/test-dir/myfile.txt +++ /dev/null @@ -1 +0,0 @@ -Hello, LocalStack From ed780d866d0185d2094df0a7823465e2e99e9052 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Tue, 9 Jul 2024 09:48:44 +0300 Subject: [PATCH 05/17] fix: sync_test --- e2e/sync_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/sync_test.go b/e2e/sync_test.go index 3dd5c1fc0..d11ff0b7e 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -1076,10 +1076,10 @@ func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { // src bucket should have the objects in glacier for key := range S3Content { - assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("STANDARD"))) + assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("GLACIER"))) } - // dst bucket should have the objects in standard + // dst bucket should have the objects in glacier for key := range destS3Content { assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("GLACIER"))) } From 9ff4d4026c6ba96006b911583a522b2404fb671d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:48:25 +0300 Subject: [PATCH 06/17] test: added sync tests --- command/sync_strategy_test.go | 24 - e2e/run_test.go | 44 +- e2e/sync_test.go | 651 ++++++++++++------ e2e/util_test.go | 6 + go.mod | 2 +- go.sum | 4 + vendor/github.com/igungor/gofakes3/backend.go | 15 +- .../gofakes3/backend/s3bolt/backend.go | 7 + .../igungor/gofakes3/backend/s3bolt/schema.go | 14 +- .../igungor/gofakes3/backend/s3mem/backend.go | 2 +- vendor/github.com/igungor/gofakes3/error.go | 2 + .../github.com/igungor/gofakes3/gofakes3.go | 12 +- .../github.com/igungor/gofakes3/messages.go | 8 +- .../github.com/igungor/gofakes3/uploader.go | 3 +- vendor/modules.txt | 2 +- 15 files changed, 523 insertions(+), 273 deletions(-) diff --git a/command/sync_strategy_test.go b/command/sync_strategy_test.go index 27495cb85..ebbc33ca0 100644 --- a/command/sync_strategy_test.go +++ b/command/sync_strategy_test.go @@ -137,30 +137,6 @@ func TestSizeOnlyStrategy_ShouldSync(t *testing.T) { dst: &storage.Object{ModTime: timePtr(ft), Size: 10}, expected: errorpkg.ErrObjectSizesMatch, }, - - { - // src is in Glacier - name: "source is in Glacier", - src: &storage.Object{ModTime: timePtr(ft), Size: 10, StorageClass: "GLACIER"}, - dst: &storage.Object{ModTime: timePtr(ft), Size: 5}, - expected: nil, - }, - - { - // dst is in Glacier - name: "destination is in Glacier", - src: &storage.Object{ModTime: timePtr(ft), Size: 10}, - dst: &storage.Object{ModTime: timePtr(ft), Size: 5, StorageClass: "GLACIER"}, - expected: nil, - }, - - { - // src and dst are in Glacier - name: "source and destination are in Glacier", - src: &storage.Object{ModTime: timePtr(ft), Size: 10, StorageClass: "GLACIER"}, - dst: &storage.Object{ModTime: timePtr(ft), Size: 5, StorageClass: "GLACIER"}, - expected: nil, - }, } for _, tc := range testcases { diff --git a/e2e/run_test.go b/e2e/run_test.go index 63938c590..16d61d387 100644 --- a/e2e/run_test.go +++ b/e2e/run_test.go @@ -244,35 +244,35 @@ func TestRunWildcardCountGreaterEqualThanWorkerCount(t *testing.T) { assertLines(t, result.Stderr(), map[int]compareFunc{}) } -// func TestRunSpecialCharactersInPrefix(t *testing.T) { -// t.Parallel() +func TestRunSpecialCharactersInPrefix(t *testing.T) { + t.Parallel() -// bucket := s3BucketFromTestName(t) -// sourceFileName := `special-chars_!@#$%^&_()_+{[_%5Cäè| __;'_,_._-中文 =/_!@#$%^&_()_+{[_%5Cäè| __;'_,_._-中文 =image.jpg` -// targetFilePath := `./image.jpg` + bucket := s3BucketFromTestName(t) + sourceFileName := `special-chars_!@#$%^&_()_+{[_%5Cäè| __;'_,_._-中文 =/_!@#$%^&_()_+{[_%5Cäè| __;'_,_._-中文 =image.jpg` + targetFilePath := `./image.jpg` -// s3client, s5cmd := setup(t) + s3client, s5cmd := setup(t) -// createBucket(t, s3client, bucket) -// putFile(t, s3client, bucket, sourceFileName, "content") + createBucket(t, s3client, bucket) + putFile(t, s3client, bucket, sourceFileName, "content") -// content := []string{ -// `cp "s3://` + bucket + `/` + sourceFileName + `" ` + targetFilePath, -// } -// file := fs.NewFile(t, "prefix", fs.WithContent(strings.Join(content, "\n"))) -// defer file.Remove() + content := []string{ + `cp "s3://` + bucket + `/` + sourceFileName + `" ` + targetFilePath, + } + file := fs.NewFile(t, "prefix", fs.WithContent(strings.Join(content, "\n"))) + defer file.Remove() -// cmd := s5cmd("run", file.Path()) -// cmd.Timeout = time.Second -// result := icmd.RunCmd(cmd) -// result.Assert(t, icmd.Success) + cmd := s5cmd("run", file.Path()) + cmd.Timeout = time.Second + result := icmd.RunCmd(cmd) + result.Assert(t, icmd.Success) -// assertLines(t, result.Stdout(), map[int]compareFunc{ -// 0: equals(`cp s3://%v/%v %v`, bucket, sourceFileName, targetFilePath), -// }, sortInput(true)) + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`cp s3://%v/%v %v`, bucket, sourceFileName, targetFilePath), + }, sortInput(true)) -// assertLines(t, result.Stderr(), map[int]compareFunc{}) -// } + assertLines(t, result.Stderr(), map[int]compareFunc{}) +} func TestRunDryRun(t *testing.T) { t.Parallel() diff --git a/e2e/sync_test.go b/e2e/sync_test.go index d11ff0b7e..5ec88e917 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -92,7 +92,6 @@ func TestSyncSingleS3ObjectToLocalTwice(t *testing.T) { // rerunning same command should not download object, empty result expected result = icmd.RunCmd(cmd) result.Assert(t, icmd.Success) - assertLines(t, result.Stdout(), map[int]compareFunc{}) } // sync file s3://bucket @@ -305,14 +304,14 @@ func TestSyncS3BucketToEmptyFolder(t *testing.T) { bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -354,7 +353,7 @@ func TestSyncS3BucketToEmptyFolder(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -373,14 +372,14 @@ func TestSyncS3BucketToEmptyS3Bucket(t *testing.T) { createBucket(t, s3client, bucket) createBucket(t, s3client, dstbucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -401,12 +400,12 @@ func TestSyncS3BucketToEmptyS3Bucket(t *testing.T) { }, sortInput(true)) // assert s3 objects in source bucket. - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } // assert s3 objects in dest bucket - for key, content := range S3Content { + for key, content := range s3Content { key = fmt.Sprintf("%s/%s", prefix, key) // add the prefix assert.Assert(t, ensureS3Object(s3client, dstbucket, key, content)) } @@ -442,14 +441,14 @@ func TestSyncLocalFolderToS3BucketSameObjectsSourceOlder(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "main.py": "D: this is a python file", "testfile.txt": "D: this is a test file", "readme.md": "D: this is a readme file", "a/another_test_file.txt": "D: yet another txt file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -483,7 +482,7 @@ func TestSyncLocalFolderToS3BucketSameObjectsSourceOlder(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -517,13 +516,13 @@ func TestSyncLocalFolderToS3BucketSourceNewer(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "D: this is a test file ", "readme.md": "D: this is a readme file", "dir/main.py": "D: python file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -596,14 +595,14 @@ func TestSyncS3BucketToLocalFolderSameObjectsSourceOlder(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "main.py": "S: this is a python file", "testfile.txt": "S: this is a test file", // content different from local "readme.md": "S: this is a readme file", // content different from local "a/another_test_file.txt": "S: yet another txt file", // content different from local } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -638,7 +637,7 @@ func TestSyncS3BucketToLocalFolderSameObjectsSourceOlder(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -673,14 +672,14 @@ func TestSyncS3BucketToLocalFolderSameObjectsSourceNewer(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "main.py": "S: this is a python file", "testfile.txt": "S: this is an updated test file", "readme.md": "S: this is an updated readme file", "a/another_test_file.txt": "S: yet another updated txt file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -715,7 +714,7 @@ func TestSyncS3BucketToLocalFolderSameObjectsSourceNewer(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -856,307 +855,555 @@ func TestSyncS3BucketToS3BucketSameSizesSourceOlder(t *testing.T) { } } -// sync s3://bucket/* s3://destbucket/ (source is glacier, destination is standard) -func TestSyncS3BucketToS3BucketSourceGlacierDestinationStandard(t *testing.T) { +// sync --size-only s3://bucket/* folder/ +func TestSyncS3BucketToLocalFolderSameObjectsSizeOnly(t *testing.T) { t.Parallel() s3client, s5cmd := setup(t) + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + folderLayout := []fs.PathOp{ + fs.WithFile("test.py", "D: this is a python file"), + fs.WithFile("testfile.txt", "D: this is a test file"), + fs.WithFile("readme.md", "D: this is a readme file"), + fs.WithDir("a", + fs.WithFile("another_test_file.txt", "D: yet another txt file"), + ), + } + + workdir := fs.NewDir(t, "somedir", folderLayout...) + defer workdir.Remove() + + s3Content := map[string]string{ + "test.py": "S: this is an updated python file", // content different from local, different size + "testfile.txt": "S: this is a test file", // content different from local, same size + "readme.md": "S: this is a readme file", // content different from local, same size + "a/another_test_file.txt": "S: yet another txt file", // content different from local, same size + "abc/def/main.py": "S: python file", // local does not have it. + } + + for filename, content := range s3Content { + putFile(t, s3client, bucket, filename, content) + } + + bucketPath := fmt.Sprintf("s3://%v", bucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("%v/", workdir.Path()) + dst = filepath.ToSlash(dst) + + // log debug + cmd := s5cmd("--log", "debug", "sync", "--size-only", src, dst) + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`DEBUG "sync %v/a/another_test_file.txt %va/another_test_file.txt": object size matches`, bucketPath, dst), + 1: equals(`DEBUG "sync %v/readme.md %vreadme.md": object size matches`, bucketPath, dst), + 2: equals(`DEBUG "sync %v/testfile.txt %vtestfile.txt": object size matches`, bucketPath, dst), + 3: equals(`cp %v/abc/def/main.py %vabc/def/main.py`, bucketPath, dst), + 4: equals(`cp %v/test.py %vtest.py`, bucketPath, dst), + }, sortInput(true)) + + expectedFolderLayout := []fs.PathOp{ + fs.WithFile("test.py", "S: this is an updated python file"), + fs.WithFile("testfile.txt", "D: this is a test file"), + fs.WithFile("readme.md", "D: this is a readme file"), + fs.WithDir("a", + fs.WithFile("another_test_file.txt", "D: yet another txt file"), + ), + fs.WithDir("abc", + fs.WithDir("def", + fs.WithFile("main.py", "S: python file"), + ), + ), + } + + // expected folder structure without the timestamp. + expected := fs.Expected(t, expectedFolderLayout...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) + + // assert s3 + for key, content := range s3Content { + assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) + } +} + +// sync s3://bucket/* s3://destbucket/ (same objects, same size, same content, different or same storage class) +func TestSyncS3BucketToS3BucketIsStorageClassChanging(t *testing.T) { + t.Parallel() + s3client, s5cmd := setup(t) + + srcbucket := s3BucketFromTestName(t) dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") - createBucket(t, s3client, bucket) + createBucket(t, s3client, srcbucket) createBucket(t, s3client, dstbucket) - // put objects in source bucket - S3Content := map[string]string{ - "testfile.txt": "S: this is a test file", - "readme.md": "S: this is a readme file", - "a/another_test_file.txt": "S: yet another txt file", - "abc/def/test.py": "S: file in nested folders", + storageClassesAndFile := []struct { + srcStorageClass string + dstStorageClass string + filename string + content string + }{ + {"STANDARD", "STANDARD", "testfile1.txt", "this is a test file"}, + {"STANDARD", "GLACIER", "testfile2.txt", "this is a test file"}, + {"GLACIER", "STANDARD", "testfile3.txt", "this is a test file"}, + {"GLACIER", "GLACIER", "testfile4.txt", "this is a test file"}, } - // put objects in glacier + for _, sc := range storageClassesAndFile { - for filename, content := range S3Content { putObject := s3.PutObjectInput{ - Bucket: &bucket, - Key: &filename, - Body: strings.NewReader(content), - StorageClass: aws.String("GLACIER"), + Bucket: &srcbucket, + Key: &sc.filename, + Body: strings.NewReader(sc.content), + StorageClass: &sc.srcStorageClass, } + _, err := s3client.PutObject(&putObject) if err != nil { - t.Fatalf("failed to put object in glacier: %v", err) + t.Fatalf("failed to put object in %v: %v", sc.srcStorageClass, err) } - } - - // put objects in destination bucket - destS3Content := map[string]string{ - "testfile.txt": "D: this is a test file", - "readme.md": "D: this is a readme file", - "a/another_test_file.txt": "D: yet another txt file", - "abc/def/test.py": "D: file in nested folders", - } - for filename, content := range destS3Content { - putObject := s3.PutObjectInput{ + putObject = s3.PutObjectInput{ Bucket: &dstbucket, - Key: &filename, - Body: strings.NewReader(content), - StorageClass: aws.String("STANDARD"), + Key: &sc.filename, + Body: strings.NewReader(sc.content), + StorageClass: aws.String(sc.dstStorageClass), } - _, err := s3client.PutObject(&putObject) + _, err = s3client.PutObject(&putObject) if err != nil { - t.Fatalf("failed to put object in standard: %v", err) + t.Fatalf("failed to put object in %v: %v", sc.dstStorageClass, err) } + } - bucketPath := fmt.Sprintf("s3://%v", bucket) + bucketPath := fmt.Sprintf("s3://%v", srcbucket) src := fmt.Sprintf("%s/*", bucketPath) dst := fmt.Sprintf("s3://%v/", dstbucket) - // log debug - cmd := s5cmd("--log", "debug", "sync", src, dst) + cmd := s5cmd("sync", src, dst) result := icmd.RunCmd(cmd) - // there will be no stdout, since the objects are in glacier + // there will be no stdout, since there are no changes result.Assert(t, icmd.Success) + assertLines(t, result.Stdout(), map[int]compareFunc{}) - // src bucket should have the objects in glacier - for key := range S3Content { - assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("GLACIER"))) + // assert s3 objects in source + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, srcbucket, sc.filename, sc.content, ensureStorageClass(sc.srcStorageClass))) } - // dst bucket should have the objects in standard - for key := range destS3Content { - assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("STANDARD"))) + // assert s3 objects in destination + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, dstbucket, sc.filename, sc.content, ensureStorageClass(sc.dstStorageClass))) } } -func TestSyncS3BucketToS3BucketSourceStandardDestinationGlacier(t *testing.T) { +// sync dir s3://destbucket/ (same objects, same size, same content, different or same storage class) +func TestSyncLocalFolderToS3BucketIsStorageClassChanging(t *testing.T) { t.Parallel() s3client, s5cmd := setup(t) - bucket := s3BucketFromTestName(t) - dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") + bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) - createBucket(t, s3client, dstbucket) - // put objects in source bucket - S3Content := map[string]string{ - "testfile.txt": "S: this is a test file", - "readme.md": "S: this is a readme file", - "a/another_test_file.txt": "S: yet another txt file", - "abc/def/test.py": "S: file in nested folders", + folderLayout := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file"), + fs.WithFile("testfile2.txt", "this is a test file"), } - // put objects in glacier + workdir := fs.NewDir(t, "somedir", folderLayout...) + defer workdir.Remove() + + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, + } + + for _, sc := range storageClassesAndFile { - for filename, content := range S3Content { putObject := s3.PutObjectInput{ Bucket: &bucket, - Key: &filename, - Body: strings.NewReader(content), - StorageClass: aws.String("STANDARD"), + Key: &sc.filename, + Body: strings.NewReader(sc.content), + StorageClass: aws.String(sc.storageClass), } + _, err := s3client.PutObject(&putObject) if err != nil { - t.Fatalf("failed to put object in glacier: %v", err) + t.Fatalf("failed to put object in %v: %v", sc.storageClass, err) } } - // put objects in destination bucket - destS3Content := map[string]string{ - "testfile.txt": "D: this is a test file", - "readme.md": "D: this is a readme file", - "a/another_test_file.txt": "D: yet another txt file", - "abc/def/test.py": "D: file in nested folders", + src := fmt.Sprintf("%v/", workdir.Path()) + src = filepath.ToSlash(src) + dst := fmt.Sprintf("s3://%v/", bucket) + + cmd := s5cmd("sync", src, dst) + result := icmd.RunCmd(cmd) + + // there will be no stdout + result.Assert(t, icmd.Success) + assertLines(t, result.Stdout(), map[int]compareFunc{}) + + // expected folder structure without the timestamp. + expected := fs.Expected(t, folderLayout...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) + + // assert s3 objects in destination + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, bucket, sc.filename, sc.content, ensureStorageClass(sc.storageClass))) } +} + +// sync s3://srcbucket/ dir (same objects, same size, same content, different or same storage class) +func TestSyncS3BucketToLocalFolderIsStorageClassChanging(t *testing.T) { + t.Parallel() + s3client, s5cmd := setup(t) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, + } + + for _, sc := range storageClassesAndFile { - for filename, content := range destS3Content { putObject := s3.PutObjectInput{ - Bucket: &dstbucket, - Key: &filename, - Body: strings.NewReader(content), - StorageClass: aws.String("GLACIER"), + Bucket: &bucket, + Key: &sc.filename, + Body: strings.NewReader(sc.content), + StorageClass: aws.String(sc.storageClass), } _, err := s3client.PutObject(&putObject) if err != nil { - t.Fatalf("failed to put object in standard: %v", err) + t.Fatalf("failed to put object in %v: %v", sc.storageClass, err) } } + folderLayout := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file"), + fs.WithFile("testfile2.txt", "this is a test file"), + } + + // put objects in local folder + + workdir := fs.NewDir(t, "somedir", folderLayout...) + defer workdir.Remove() + bucketPath := fmt.Sprintf("s3://%v", bucket) src := fmt.Sprintf("%s/*", bucketPath) - dst := fmt.Sprintf("s3://%v/", dstbucket) + dst := fmt.Sprintf("%v/", workdir.Path()) + dst = filepath.ToSlash(dst) - // log debug - cmd := s5cmd("--log", "debug", "sync", src, dst) + cmd := s5cmd("sync", src, dst) result := icmd.RunCmd(cmd) - // there will be no stdout, since the objects are in glacier + // there will be no stdout result.Assert(t, icmd.Success) + assertLines(t, result.Stdout(), map[int]compareFunc{}) - // src bucket should have the objects in glacier - for key := range S3Content { - assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("STANDARD"))) - } + // expected folder structure without the timestamp. + expected := fs.Expected(t, folderLayout...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) - // dst bucket should have the objects in standard - for key := range destS3Content { - assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("GLACIER"))) + // assert s3 objects in destination + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, bucket, sc.filename, sc.content, ensureStorageClass(sc.storageClass))) } } -func TestSyncS3BucketToS3BucketSourceGlacierDestinationGlacier(t *testing.T) { +// sync s3://srcbucket/* s3://dstbucket/ (same objects, different size, different content, different or same storage class) +func TestSyncS3BucketToS3BucketIsStorageClassChangingWithDifferentSizeAndContent(t *testing.T) { t.Parallel() s3client, s5cmd := setup(t) - bucket := s3BucketFromTestName(t) + + srcbucket := s3BucketFromTestName(t) dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") - createBucket(t, s3client, bucket) + createBucket(t, s3client, srcbucket) createBucket(t, s3client, dstbucket) - // put objects in source bucket - S3Content := map[string]string{ - "testfile.txt": "S: this is a test file", - "readme.md": "S: this is a readme file", - "a/another_test_file.txt": "S: yet another txt file", - "abc/def/test.py": "S: file in nested folders", + storageClassesAndFile := []struct { + srcStorageClass string + dstStorageClass string + filename string + srcContent string + dstContent string + }{ + {"STANDARD", "STANDARD", "testfile1.txt", "this is an updated test file", "this is a test file"}, + {"STANDARD", "GLACIER", "testfile2.txt", "this is an updated test file", "this is a test file"}, + {"GLACIER", "STANDARD", "testfile3.txt", "this is an updated test file", "this is a test file"}, + {"GLACIER", "GLACIER", "testfile4.txt", "this is an updated test file", "this is a test file"}, } - // put objects in glacier + for _, sc := range storageClassesAndFile { - for filename, content := range S3Content { - putObject := s3.PutObjectInput{ - Bucket: &bucket, - Key: &filename, - Body: strings.NewReader(content), - StorageClass: aws.String("GLACIER"), - } - _, err := s3client.PutObject(&putObject) - if err != nil { - t.Fatalf("failed to put object in glacier: %v", err) - } - } + putFile(t, s3client, srcbucket, sc.filename, sc.srcContent, putStorageClass(sc.srcStorageClass)) - // put objects in destination bucket - destS3Content := map[string]string{ - "testfile.txt": "D: this is a test file", - "readme.md": "D: this is a readme file", - "a/another_test_file.txt": "D: yet another txt file", - "abc/def/test.py": "D: file in nested folders", - } - - for filename, content := range destS3Content { putObject := s3.PutObjectInput{ Bucket: &dstbucket, - Key: &filename, - Body: strings.NewReader(content), - StorageClass: aws.String("GLACIER"), + Key: &sc.filename, + Body: strings.NewReader(sc.dstContent), + StorageClass: aws.String(sc.dstStorageClass), } _, err := s3client.PutObject(&putObject) if err != nil { - t.Fatalf("failed to put object in standard: %v", err) + t.Fatalf("failed to put object in %v: %v", sc.dstStorageClass, err) } } - bucketPath := fmt.Sprintf("s3://%v", bucket) + bucketPath := fmt.Sprintf("s3://%v", srcbucket) src := fmt.Sprintf("%s/*", bucketPath) dst := fmt.Sprintf("s3://%v/", dstbucket) - // log debug - cmd := s5cmd("--log", "debug", "sync", src, dst) + cmd := s5cmd("sync", src, dst) + result := icmd.RunCmd(cmd) - // there will be no stdout, since the objects are in glacier + fmt.Println(result.Stdout()) + fmt.Println(result.Stderr()) + result.Assert(t, icmd.Success) - // src bucket should have the objects in glacier - for key := range S3Content { - assert.Assert(t, ensureS3Object(s3client, bucket, key, S3Content[key], ensureStorageClass("GLACIER"))) - } + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`cp %v/testfile1.txt %vtestfile1.txt`, bucketPath, dst), + 1: equals(`cp %v/testfile2.txt %vtestfile2.txt`, bucketPath, dst), + }, sortInput(true)) - // dst bucket should have the objects in glacier - for key := range destS3Content { - assert.Assert(t, ensureS3Object(s3client, dstbucket, key, destS3Content[key], ensureStorageClass("GLACIER"))) + // assert s3 objects in source + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, srcbucket, sc.filename, sc.srcContent, ensureStorageClass(sc.srcStorageClass))) } + + // assert s3 objects in destination (file1 and file2 should be updated and file3 and file4 should be same as before) + assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile1.txt", "this is an updated test file", ensureStorageClass("STANDARD"))) + assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile2.txt", "this is an updated test file", ensureStorageClass("STANDARD"))) + assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile3.txt", "this is a test file", ensureStorageClass("STANDARD"))) + assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile4.txt", "this is a test file", ensureStorageClass("GLACIER"))) } -// sync --size-only s3://bucket/* folder/ -func TestSyncS3BucketToLocalFolderSameObjectsSizeOnly(t *testing.T) { +// sync dir s3://destbucket/ (same objects, different size, different content, different or same storage class) +func TestSyncLocalFolderToS3BucketIsStorageClassChangingWithDifferentSizeAndContent(t *testing.T) { t.Parallel() + s3client, s5cmd := setup(t) bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) folderLayout := []fs.PathOp{ - fs.WithFile("test.py", "D: this is a python file"), - fs.WithFile("testfile.txt", "D: this is a test file"), - fs.WithFile("readme.md", "D: this is a readme file"), - fs.WithDir("a", - fs.WithFile("another_test_file.txt", "D: yet another txt file"), - ), + fs.WithFile("testfile1.txt", "this is an updated test file"), + fs.WithFile("testfile2.txt", "this is an updated test file"), } workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ - "test.py": "S: this is an updated python file", // content different from local, different size - "testfile.txt": "S: this is a test file", // content different from local, same size - "readme.md": "S: this is a readme file", // content different from local, same size - "a/another_test_file.txt": "S: yet another txt file", // content different from local, same size - "abc/def/main.py": "S: python file", // local does not have it. + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, } - for filename, content := range S3Content { - putFile(t, s3client, bucket, filename, content) + for _, sc := range storageClassesAndFile { + putFile(t, s3client, bucket, sc.filename, sc.content, putStorageClass(sc.storageClass)) + } + + src := fmt.Sprintf("%v/", workdir.Path()) + src = filepath.ToSlash(src) + dst := fmt.Sprintf("s3://%v/", bucket) + + cmd := s5cmd("sync", src, dst) + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + fmt.Println(result.Stdout()) + // fmt.Printf(`cp %v/testfile1.txt %vtestfile1.txt\n`, src, dst) + // fmt.Printf(`cp %v/testfile2.txt %vtestfile2.txt\n`, src, dst) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`cp %vtestfile1.txt %vtestfile1.txt`, src, dst), + 1: equals(`cp %vtestfile2.txt %vtestfile2.txt`, src, dst), + }, sortInput(true)) + + // expected folder structure without the timestamp. + expected := fs.Expected(t, folderLayout...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) + + // assert s3 objects in destination + assert.Assert(t, ensureS3Object(s3client, bucket, "testfile1.txt", "this is an updated test file")) + assert.Assert(t, ensureS3Object(s3client, bucket, "testfile2.txt", "this is an updated test file")) +} + +// sync s3://destbucket/ dir (same objects, different size, different content, different or same storage class) +func TestSyncS3BucketToLocalFolderIsStorageClassChangingWithDifferentSizeAndContent(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is an updated test file"}, + {"GLACIER", "testfile2.txt", "this is an updated test file"}, } + for _, sc := range storageClassesAndFile { + putFile(t, s3client, bucket, sc.filename, sc.content, putStorageClass(sc.storageClass)) + } + + folderLayout := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file"), + fs.WithFile("testfile2.txt", "this is a test file"), + } + + // put objects in local folder + workdir := fs.NewDir(t, "somedir", folderLayout...) + defer workdir.Remove() + bucketPath := fmt.Sprintf("s3://%v", bucket) src := fmt.Sprintf("%s/*", bucketPath) dst := fmt.Sprintf("%v/", workdir.Path()) dst = filepath.ToSlash(dst) - // log debug - cmd := s5cmd("--log", "debug", "sync", "--size-only", src, dst) + cmd := s5cmd("sync", src, dst) + result := icmd.RunCmd(cmd) result.Assert(t, icmd.Success) + // testfile1.txt should be updated and testfile2.txt shouldn't be updated because it is in glacier. assertLines(t, result.Stdout(), map[int]compareFunc{ - 0: equals(`DEBUG "sync %v/a/another_test_file.txt %va/another_test_file.txt": object size matches`, bucketPath, dst), - 1: equals(`DEBUG "sync %v/readme.md %vreadme.md": object size matches`, bucketPath, dst), - 2: equals(`DEBUG "sync %v/testfile.txt %vtestfile.txt": object size matches`, bucketPath, dst), - 3: equals(`cp %v/abc/def/main.py %vabc/def/main.py`, bucketPath, dst), - 4: equals(`cp %v/test.py %vtest.py`, bucketPath, dst), + 0: equals(`cp %v/testfile1.txt %vtestfile1.txt`, bucketPath, dst), }, sortInput(true)) expectedFolderLayout := []fs.PathOp{ - fs.WithFile("test.py", "S: this is an updated python file"), - fs.WithFile("testfile.txt", "D: this is a test file"), - fs.WithFile("readme.md", "D: this is a readme file"), - fs.WithDir("a", - fs.WithFile("another_test_file.txt", "D: yet another txt file"), - ), - fs.WithDir("abc", - fs.WithDir("def", - fs.WithFile("main.py", "S: python file"), - ), - ), + fs.WithFile("testfile1.txt", "this is an updated test file"), + fs.WithFile("testfile2.txt", "this is a test file"), } // expected folder structure without the timestamp. expected := fs.Expected(t, expectedFolderLayout...) assert.Assert(t, fs.Equal(workdir.Path(), expected)) +} - // assert s3 - for key, content := range S3Content { - assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) +// sync --delete s3://bucket/* s3://destbucket/ (storage class test) +func TestSyncS3BucketToS3BucketWithDeleteStorageClass(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + srcbucket := s3BucketFromTestName(t) + dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") + + createBucket(t, s3client, srcbucket) + createBucket(t, s3client, dstbucket) + + dstStorageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, + } + + for _, sc := range dstStorageClassesAndFile { + putFile(t, s3client, dstbucket, sc.filename, sc.content, putStorageClass(sc.storageClass)) + } + + bucketPath := fmt.Sprintf("s3://%v", srcbucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("s3://%v/", dstbucket) + + cmd := s5cmd("sync", "--delete", src, dst) + + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`rm %vtestfile1.txt`, dst), + 1: equals(`rm %vtestfile2.txt`, dst), + }, sortInput(true)) + + // assert s3 objects in destination + for _, sc := range dstStorageClassesAndFile { + err := ensureS3Object(s3client, dstbucket, sc.filename, sc.content, ensureStorageClass(sc.storageClass)) + assertError(t, err, errS3NoSuchKey) + } +} + +// sync --delete dir s3://destbucket/ (storage class test) +func TestSyncLocalFolderToS3BucketWithDeleteStorageClass(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, + } + + for _, sc := range storageClassesAndFile { + putFile(t, s3client, bucket, sc.filename, sc.content, putStorageClass(sc.storageClass)) + } + + workdir := fs.NewDir(t, "somedir") + defer workdir.Remove() + + src := fmt.Sprintf("%v/", workdir.Path()) + src = filepath.ToSlash(src) + dst := fmt.Sprintf("s3://%v/", bucket) + + cmd := s5cmd("sync", "--delete", src, dst) + + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`rm %vtestfile1.txt`, dst), + 1: equals(`rm %vtestfile2.txt`, dst), + }, sortInput(true)) + + // assert s3 objects in destination + for _, sc := range storageClassesAndFile { + err := ensureS3Object(s3client, bucket, sc.filename, sc.content, ensureStorageClass(sc.storageClass)) + assertError(t, err, errS3NoSuchKey) } } @@ -1186,14 +1433,14 @@ func TestSyncLocalFolderToS3BucketSameObjectsSizeOnly(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "test.py": "D: this is a python file", "testfile.txt": "D: this is an updated test file", "readme.md": "D: this is a readme file", "a/another_test_file.txt": "D: yet another txt file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -1315,11 +1562,11 @@ func TestSyncS3BucketToLocalWithDelete(t *testing.T) { bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "contributing.md": "S: this is a readme file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -1360,7 +1607,7 @@ func TestSyncS3BucketToLocalWithDelete(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -1373,11 +1620,11 @@ func TestSyncS3BucketToEmptyLocalWithDelete(t *testing.T) { bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "contributing.md": "S: this is a readme file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -1406,7 +1653,7 @@ func TestSyncS3BucketToEmptyLocalWithDelete(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -1429,13 +1676,13 @@ func TestSyncLocalToS3BucketWithDelete(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "readme.md": "D: this is a readme file", "dir/main.py": "D: this is a python file", "testfile.txt": "D: this is a test file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -1472,7 +1719,7 @@ func TestSyncLocalToS3BucketWithDelete(t *testing.T) { } // assert s3 objects should be deleted. - for key, content := range S3Content { + for key, content := range s3Content { err := ensureS3Object(s3client, bucket, key, content) if err == nil { t.Errorf("File %v is not deleted from remote : %v\n", key, err) @@ -2043,14 +2290,14 @@ func TestSyncS3BucketToEmptyS3BucketWithExitOnErrorFlag(t *testing.T) { createBucket(t, s3client, bucket) createBucket(t, s3client, dstbucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -2071,12 +2318,12 @@ func TestSyncS3BucketToEmptyS3BucketWithExitOnErrorFlag(t *testing.T) { }, sortInput(true)) // assert s3 objects in source bucket. - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } // assert s3 objects in dest bucket - for key, content := range S3Content { + for key, content := range s3Content { key = fmt.Sprintf("%s/%s", prefix, key) // add the prefix assert.Assert(t, ensureS3Object(s3client, dstbucket, key, content)) } @@ -2095,14 +2342,14 @@ func TestSyncExitOnErrorS3BucketToS3BucketThatDoesNotExist(t *testing.T) { createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -2132,14 +2379,14 @@ func TestSyncS3BucketToS3BucketThatDoesNotExist(t *testing.T) { createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } diff --git a/e2e/util_test.go b/e2e/util_test.go index af2da82cd..5431f0d28 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -741,6 +741,12 @@ func putArbitraryMetadata(metadata map[string]*string) putOption { } } +func putStorageClass(storageClass string) putOption { + return func(opts *s3.PutObjectInput) { + opts.StorageClass = aws.String(storageClass) + } +} + func putFile(t *testing.T, client *s3.S3, bucket string, filename string, content string, opts ...putOption) { t.Helper() input := &s3.PutObjectInput{ diff --git a/go.mod b/go.mod index 2c193acd0..a2944014d 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/go-multierror v1.1.1 github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 - github.com/igungor/gofakes3 v0.0.16 + github.com/igungor/gofakes3 v0.0.18 github.com/karrick/godirwalk v1.15.3 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/lanrat/extsort v1.0.0 diff --git a/go.sum b/go.sum index 6eadffd54..bde650031 100644 --- a/go.sum +++ b/go.sum @@ -26,6 +26,10 @@ github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFE github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/igungor/gofakes3 v0.0.16 h1:aMipkwE9s2u4T6GfgIPZ8ngJcReYsJvGRm6c4/lLAfY= github.com/igungor/gofakes3 v0.0.16/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= +github.com/igungor/gofakes3 v0.0.17 h1:ZIdB79fh9/SxIG6RHMNTm9+YHA3bX5nxT4/l763eRaE= +github.com/igungor/gofakes3 v0.0.17/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= +github.com/igungor/gofakes3 v0.0.18 h1:LTJV11PrTJ6DGEiCEKXh75wFZtwv773Nof0FB9Ef5qc= +github.com/igungor/gofakes3 v0.0.18/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= diff --git a/vendor/github.com/igungor/gofakes3/backend.go b/vendor/github.com/igungor/gofakes3/backend.go index d55798a0d..c537e6d5f 100644 --- a/vendor/github.com/igungor/gofakes3/backend.go +++ b/vendor/github.com/igungor/gofakes3/backend.go @@ -13,12 +13,13 @@ const ( // // You MUST always call Contents.Close() otherwise you may leak resources. type Object struct { - Name string - Metadata map[string]string - Size int64 - Contents io.ReadCloser - Hash []byte - Range *ObjectRange + Name string + Metadata map[string]string + Size int64 + Contents io.ReadCloser + Hash []byte + Range *ObjectRange + StorageClass StorageClass // VersionID will be empty if bucket versioning has not been enabled. VersionID VersionID @@ -226,7 +227,7 @@ type Backend interface { // // The size can be used if the backend needs to read the whole reader; use // gofakes3.ReadAll() for this job rather than ioutil.ReadAll(). - PutObject(bucketName, key string, meta map[string]string, input io.Reader, size int64) (PutObjectResult, error) + PutObject(bucketName, key string, meta map[string]string, input io.Reader, size int64, storageClass StorageClass) (PutObjectResult, error) DeleteMulti(bucketName string, objects ...string) (MultiDeleteResult, error) } diff --git a/vendor/github.com/igungor/gofakes3/backend/s3bolt/backend.go b/vendor/github.com/igungor/gofakes3/backend/s3bolt/backend.go index b37b40ebd..5fad0bb6e 100644 --- a/vendor/github.com/igungor/gofakes3/backend/s3bolt/backend.go +++ b/vendor/github.com/igungor/gofakes3/backend/s3bolt/backend.go @@ -57,6 +57,10 @@ func New(bolt *bolt.DB, opts ...Option) *Backend { return b } +func (db *Backend) Close() error { + return db.bolt.Close() +} + // metaBucket returns a utility that manages access to the metadata bucket. // The returned struct is valid only for the lifetime of the bolt.Tx. // The metadata bucket may not exist if this is an older database. @@ -168,6 +172,7 @@ func (db *Backend) ListBucket(name string, prefix *gofakes3.Prefix, page gofakes ETag: `"` + hex.EncodeToString(b.Hash[:]) + `"`, Size: b.Size, LastModified: gofakes3.NewContentTime(b.LastModified.UTC()), + StorageClass: gofakes3.StorageClass(b.StorageClass), } objects.Add(item) } @@ -295,6 +300,7 @@ func (db *Backend) PutObject( bucketName, objectName string, meta map[string]string, input io.Reader, size int64, + storageClass gofakes3.StorageClass, ) (result gofakes3.PutObjectResult, err error) { bts, err := gofakes3.ReadAll(input, size) @@ -318,6 +324,7 @@ func (db *Backend) PutObject( LastModified: mod, Contents: bts, Hash: hash[:], + StorageClass: storageClass, }) if err != nil { return err diff --git a/vendor/github.com/igungor/gofakes3/backend/s3bolt/schema.go b/vendor/github.com/igungor/gofakes3/backend/s3bolt/schema.go index f5908f02e..e093db714 100644 --- a/vendor/github.com/igungor/gofakes3/backend/s3bolt/schema.go +++ b/vendor/github.com/igungor/gofakes3/backend/s3bolt/schema.go @@ -27,6 +27,7 @@ type boltObject struct { Size int64 Contents []byte Hash []byte + StorageClass gofakes3.StorageClass `bson:",omitempty"` } func (b *boltObject) Object(objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (*gofakes3.Object, error) { @@ -42,12 +43,13 @@ func (b *boltObject) Object(objectName string, rangeRequest *gofakes3.ObjectRang } return &gofakes3.Object{ - Name: objectName, - Metadata: b.Metadata, - Size: b.Size, - Contents: s3io.ReaderWithDummyCloser{bytes.NewReader(data)}, - Range: rnge, - Hash: b.Hash, + Name: objectName, + Metadata: b.Metadata, + Size: b.Size, + Contents: s3io.ReaderWithDummyCloser{bytes.NewReader(data)}, + Range: rnge, + Hash: b.Hash, + StorageClass: b.StorageClass, }, nil } diff --git a/vendor/github.com/igungor/gofakes3/backend/s3mem/backend.go b/vendor/github.com/igungor/gofakes3/backend/s3mem/backend.go index 27c7a4f8a..21c5bb19e 100644 --- a/vendor/github.com/igungor/gofakes3/backend/s3mem/backend.go +++ b/vendor/github.com/igungor/gofakes3/backend/s3mem/backend.go @@ -217,7 +217,7 @@ func (db *Backend) GetObject(bucketName, objectName string, rangeRequest *gofake return result, nil } -func (db *Backend) PutObject(bucketName, objectName string, meta map[string]string, input io.Reader, size int64) (result gofakes3.PutObjectResult, err error) { +func (db *Backend) PutObject(bucketName, objectName string, meta map[string]string, input io.Reader, size int64, storageClass gofakes3.StorageClass) (result gofakes3.PutObjectResult, err error) { // No need to lock the backend while we read the data into memory; it holds // the write lock open unnecessarily, and could be blocked for an unreasonably // long time by a connection timing out: diff --git a/vendor/github.com/igungor/gofakes3/error.go b/vendor/github.com/igungor/gofakes3/error.go index d0a71f77e..c85974946 100644 --- a/vendor/github.com/igungor/gofakes3/error.go +++ b/vendor/github.com/igungor/gofakes3/error.go @@ -88,6 +88,8 @@ const ( ErrNotImplemented ErrorCode = "NotImplemented" ErrInternal ErrorCode = "InternalError" + + ErrInvalidStorageClass ErrorCode = "InvalidStorageClass" ) // INTERNAL errors! These are not part of the S3 interface, they are codes diff --git a/vendor/github.com/igungor/gofakes3/gofakes3.go b/vendor/github.com/igungor/gofakes3/gofakes3.go index d76c33254..372ff4927 100644 --- a/vendor/github.com/igungor/gofakes3/gofakes3.go +++ b/vendor/github.com/igungor/gofakes3/gofakes3.go @@ -534,7 +534,7 @@ func (g *GoFakeS3) createObjectBrowserUpload(bucket string, w http.ResponseWrite return err } - result, err := g.storage.PutObject(bucket, key, meta, rdr, fileHeader.Size) + result, err := g.storage.PutObject(bucket, key, meta, rdr, fileHeader.Size, StorageClass(r.Header.Get("x-amz-storage-class"))) if err != nil { return err } @@ -618,7 +618,11 @@ func (g *GoFakeS3) createObject(bucket, object string, w http.ResponseWriter, r return err } - result, err := g.storage.PutObject(bucket, object, meta, rdr, size) + if r.Header.Get("x-amz-storage-class") == "" { + r.Header.Set("x-amz-storage-class", "STANDARD") + } + + result, err := g.storage.PutObject(bucket, object, meta, rdr, size, StorageClass(r.Header.Get("x-amz-storage-class"))) if err != nil { return err } @@ -680,7 +684,7 @@ func (g *GoFakeS3) copyObject(bucket, object string, meta map[string]string, w h } } - result, err := g.storage.PutObject(bucket, object, meta, srcObj.Contents, srcObj.Size) + result, err := g.storage.PutObject(bucket, object, meta, srcObj.Contents, srcObj.Size, StorageClass(r.Header.Get("x-amz-storage-class"))) if err != nil { return err } @@ -895,7 +899,7 @@ func (g *GoFakeS3) completeMultipartUpload(bucket, object string, uploadID Uploa return err } - result, err := g.storage.PutObject(bucket, object, upload.Meta, bytes.NewReader(fileBody), int64(len(fileBody))) + result, err := g.storage.PutObject(bucket, object, upload.Meta, bytes.NewReader(fileBody), int64(len(fileBody)), StorageClass(r.Header.Get("x-amz-storage-class"))) if err != nil { return err } diff --git a/vendor/github.com/igungor/gofakes3/messages.go b/vendor/github.com/igungor/gofakes3/messages.go index 17138fe51..258fed4b3 100644 --- a/vendor/github.com/igungor/gofakes3/messages.go +++ b/vendor/github.com/igungor/gofakes3/messages.go @@ -96,7 +96,7 @@ func NewContentTime(t time.Time) ContentTime { func (c ContentTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error { // This is the format expected by the aws xml code, not the default. if !c.IsZero() { - var s = c.Format("2006-01-02T15:04:05.999Z") + s := c.Format("2006-01-02T15:04:05.999Z") return e.EncodeElement(s, start) } return nil @@ -127,7 +127,7 @@ func (d MultiDeleteResult) AsError() error { if len(d.Error) == 0 { return nil } - var strs = make([]string, 0, len(d.Error)) + strs := make([]string, 0, len(d.Error)) for _, er := range d.Error { strs = append(strs, er.String()) } @@ -256,6 +256,7 @@ type DeleteMarker struct { VersionID VersionID `xml:"VersionId"` IsLatest bool `xml:"IsLatest"` LastModified ContentTime `xml:"LastModified,omitempty"` + StorageClass string `xml:"StorageClass"` Owner *UserInfo `xml:"Owner,omitempty"` } @@ -338,7 +339,6 @@ func NewListBucketVersionsResult( prefix *Prefix, page *ListBucketVersionsPage, ) *ListBucketVersionsResult { - result := &ListBucketVersionsResult{ Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/", Name: bucketName, @@ -428,6 +428,7 @@ type ListMultipartUploadPartItem struct { LastModified ContentTime `xml:"LastModified,omitempty"` ETag string `xml:"ETag,omitempty"` Size int64 `xml:"Size"` + StorageClass string `xml:"StorageClass,omitempty"` } // CopyObjectResult contains the response from a CopyObject operation. @@ -435,6 +436,7 @@ type CopyObjectResult struct { XMLName xml.Name `xml:"CopyObjectResult"` ETag string `xml:"ETag,omitempty"` LastModified ContentTime `xml:"LastModified,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` } // MFADeleteStatus is used by VersioningConfiguration. diff --git a/vendor/github.com/igungor/gofakes3/uploader.go b/vendor/github.com/igungor/gofakes3/uploader.go index c2ce86d48..b1778d072 100644 --- a/vendor/github.com/igungor/gofakes3/uploader.go +++ b/vendor/github.com/igungor/gofakes3/uploader.go @@ -197,13 +197,12 @@ func (u *uploader) ListParts(bucket, object string, uploadID UploadID, marker in return nil, err } - var result = ListMultipartUploadPartsResult{ + result := ListMultipartUploadPartsResult{ Bucket: bucket, Key: object, UploadID: uploadID, MaxParts: limit, PartNumberMarker: marker, - StorageClass: "STANDARD", // FIXME } var cnt int64 diff --git a/vendor/modules.txt b/vendor/modules.txt index c19a12140..650fcfcf6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -91,7 +91,7 @@ github.com/hashicorp/go-multierror # github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 ## explicit github.com/iancoleman/strcase -# github.com/igungor/gofakes3 v0.0.16 +# github.com/igungor/gofakes3 v0.0.18 ## explicit; go 1.13 github.com/igungor/gofakes3 github.com/igungor/gofakes3/backend/s3bolt From eb562863486da43437a8319c7ab2ad2e0c0e537e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:02:22 +0300 Subject: [PATCH 07/17] test: added storageclass assertion to some of testes --- e2e/sync_test.go | 6 +++--- e2e/util_test.go | 7 ++++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/e2e/sync_test.go b/e2e/sync_test.go index 16947ccae..77336eba1 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -1397,7 +1397,7 @@ func TestSyncS3BucketToS3BucketIsStorageClassChangingWithDifferentSizeAndContent } // assert s3 objects in destination (file1 and file2 should be updated and file3 and file4 should be same as before) - assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile1.txt", "this is an updated test file", ensureStorageClass("STANDARD"))) + assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile1.txt", "this is an updated test file"), ensureStorageClass("STANDARD")) assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile2.txt", "this is an updated test file", ensureStorageClass("STANDARD"))) assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile3.txt", "this is a test file", ensureStorageClass("STANDARD"))) assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile4.txt", "this is a test file", ensureStorageClass("GLACIER"))) @@ -1456,8 +1456,8 @@ func TestSyncLocalFolderToS3BucketIsStorageClassChangingWithDifferentSizeAndCont assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 objects in destination - assert.Assert(t, ensureS3Object(s3client, bucket, "testfile1.txt", "this is an updated test file")) - assert.Assert(t, ensureS3Object(s3client, bucket, "testfile2.txt", "this is an updated test file")) + assert.Assert(t, ensureS3Object(s3client, bucket, "testfile1.txt", "this is an updated test file"), ensureStorageClass("STANDARD")) + assert.Assert(t, ensureS3Object(s3client, bucket, "testfile2.txt", "this is an updated test file"), ensureStorageClass("STANDARD")) } // sync s3://destbucket/ dir (same objects, different size, different content, different or same storage class) diff --git a/e2e/util_test.go b/e2e/util_test.go index 5431f0d28..f1679d1f9 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -703,7 +703,12 @@ func ensureS3Object( } if opts.storageClass != nil { - if diff := cmp.Diff(opts.storageClass, output.StorageClass); diff != "" { + storageClassofOutput := aws.String("STANDARD") + if output.StorageClass != nil { + storageClassofOutput = output.StorageClass + } + + if diff := cmp.Diff(opts.storageClass, storageClassofOutput); diff != "" { return fmt.Errorf("storage-class of %v/%v: (-want +got):\n%v", bucket, key, diff) } } From 8d7568a50990dee7b898f84c59179d8b732a4592 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:03:51 +0300 Subject: [PATCH 08/17] fix: remove unnecessary print statements in sync_test.go --- e2e/sync_test.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/e2e/sync_test.go b/e2e/sync_test.go index 77336eba1..de478e304 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -1380,10 +1380,6 @@ func TestSyncS3BucketToS3BucketIsStorageClassChangingWithDifferentSizeAndContent cmd := s5cmd("sync", src, dst) result := icmd.RunCmd(cmd) - - fmt.Println(result.Stdout()) - fmt.Println(result.Stderr()) - result.Assert(t, icmd.Success) assertLines(t, result.Stdout(), map[int]compareFunc{ @@ -1442,10 +1438,6 @@ func TestSyncLocalFolderToS3BucketIsStorageClassChangingWithDifferentSizeAndCont result.Assert(t, icmd.Success) - fmt.Println(result.Stdout()) - // fmt.Printf(`cp %v/testfile1.txt %vtestfile1.txt\n`, src, dst) - // fmt.Printf(`cp %v/testfile2.txt %vtestfile2.txt\n`, src, dst) - assertLines(t, result.Stdout(), map[int]compareFunc{ 0: equals(`cp %vtestfile1.txt %vtestfile1.txt`, src, dst), 1: equals(`cp %vtestfile2.txt %vtestfile2.txt`, src, dst), From 711db8179c9d545c511f1273d679a7b7a760b359 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= Date: Tue, 30 Jul 2024 19:14:35 +0300 Subject: [PATCH 09/17] chore: remove 0.17 s3fake --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index bde650031..c90518acc 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,6 @@ github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFE github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/igungor/gofakes3 v0.0.16 h1:aMipkwE9s2u4T6GfgIPZ8ngJcReYsJvGRm6c4/lLAfY= github.com/igungor/gofakes3 v0.0.16/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= -github.com/igungor/gofakes3 v0.0.17 h1:ZIdB79fh9/SxIG6RHMNTm9+YHA3bX5nxT4/l763eRaE= -github.com/igungor/gofakes3 v0.0.17/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= github.com/igungor/gofakes3 v0.0.18 h1:LTJV11PrTJ6DGEiCEKXh75wFZtwv773Nof0FB9Ef5qc= github.com/igungor/gofakes3 v0.0.18/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= From efb2ace3c738319d351e6ba4262237fe61f7445f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= Date: Tue, 30 Jul 2024 19:31:44 +0300 Subject: [PATCH 10/17] chore: remove 0.16 s3fake from go.sum --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index c90518acc..bdf3b8a18 100644 --- a/go.sum +++ b/go.sum @@ -24,8 +24,6 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= -github.com/igungor/gofakes3 v0.0.16 h1:aMipkwE9s2u4T6GfgIPZ8ngJcReYsJvGRm6c4/lLAfY= -github.com/igungor/gofakes3 v0.0.16/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= github.com/igungor/gofakes3 v0.0.18 h1:LTJV11PrTJ6DGEiCEKXh75wFZtwv773Nof0FB9Ef5qc= github.com/igungor/gofakes3 v0.0.18/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= From 4bb7d163dbf676886a70493ae76b76c2b3ed2069 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= Date: Tue, 30 Jul 2024 19:58:02 +0300 Subject: [PATCH 11/17] chore: revert unnecessary line delete in sync_strategy.go --- command/sync_strategy.go | 1 - 1 file changed, 1 deletion(-) diff --git a/command/sync_strategy.go b/command/sync_strategy.go index 6f445aa53..fe6e948e9 100644 --- a/command/sync_strategy.go +++ b/command/sync_strategy.go @@ -26,7 +26,6 @@ func (s *SizeOnlyStrategy) ShouldSync(srcObj, dstObj *storage.Object) error { if srcObj.Size == dstObj.Size { return errorpkg.ErrObjectSizesMatch } - return nil } From 0110fbe11a01588d753932ba34665a1ccf350f4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= Date: Tue, 30 Jul 2024 20:07:16 +0300 Subject: [PATCH 12/17] chore: revert unnecessary line delete in sync_strategy_test.go --- command/sync_strategy_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/command/sync_strategy_test.go b/command/sync_strategy_test.go index ebbc33ca0..ba1592990 100644 --- a/command/sync_strategy_test.go +++ b/command/sync_strategy_test.go @@ -138,7 +138,6 @@ func TestSizeOnlyStrategy_ShouldSync(t *testing.T) { expected: errorpkg.ErrObjectSizesMatch, }, } - for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { strategy := &SizeOnlyStrategy{} From 3932f27683c158e57d6739abdd06208884ab4c6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= Date: Tue, 30 Jul 2024 20:13:06 +0300 Subject: [PATCH 13/17] chore: revert some unnecessary --- e2e/util_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/e2e/util_test.go b/e2e/util_test.go index f1679d1f9..a286fe192 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -200,7 +200,7 @@ func workdir(t *testing.T) (*fs.Dir, string) { prefix = strings.ReplaceAll(prefix, ":", "-") } - testdir := fs.NewDir(t, prefix, fs.WithDir("workdir", fs.WithMode(0o700))) + testdir := fs.NewDir(t, prefix, fs.WithDir("workdir", fs.WithMode(0700))) workdir := testdir.Join("workdir") return testdir, workdir } @@ -415,7 +415,7 @@ func goBuildS5cmd() func() { panic(fmt.Sprintf("failed to build executable: %s", err)) } - if err := os.Chmod(s5cmdPath, 0o755); err != nil { + if err := os.Chmod(s5cmdPath, 0755); err != nil { panic(err) } @@ -450,7 +450,7 @@ func createBucket(t *testing.T, client *s3.S3, bucket string) { Bucket: aws.String(bucket), } - // remove objects first. + //remove objects first. // delete each object individually if using GCS. if isGoogleEndpointFromEnv(t) { err = client.ListObjectsPages(&listInput, func(p *s3.ListObjectsOutput, lastPage bool) bool { @@ -541,6 +541,7 @@ func createBucket(t *testing.T, client *s3.S3, bucket string) { } } }) + } func isGoogleEndpointFromEnv(t *testing.T) bool { @@ -615,7 +616,6 @@ func ensureContentEncoding(contentEncoding string) ensureOption { opts.contentEncoding = &contentEncoding } } - func ensureEncryptionMethod(encryptionMethod string) ensureOption { return func(opts *ensureOpts) { opts.encryptionMethod = &encryptionMethod @@ -627,7 +627,6 @@ func ensureEncryptionKeyID(encryptionKeyID string) ensureOption { opts.encryptionKeyID = &encryptionKeyID } } - func ensureArbitraryMetadata(metadata map[string]*string) ensureOption { return func(opts *ensureOpts) { opts.metadata = metadata @@ -700,6 +699,7 @@ func ensureS3Object( if diff := cmp.Diff(opts.contentDisposition, output.ContentDisposition); diff != "" { return fmt.Errorf("content-disposition of %v/%v: (-want +got):\n%v", bucket, key, diff) } + } if opts.storageClass != nil { From 957a0e5cdca8c7630ca9119c52e398df45513036 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Tue, 30 Jul 2024 20:29:51 +0300 Subject: [PATCH 14/17] chore: remove print statement in rm_test.go --- e2e/rm_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/e2e/rm_test.go b/e2e/rm_test.go index 43c6cb628..3a0905543 100644 --- a/e2e/rm_test.go +++ b/e2e/rm_test.go @@ -1339,8 +1339,6 @@ func TestRemoveS3ObjectsWithIncludeFilter(t *testing.T) { result.Assert(t, icmd.Success) - fmt.Println(result.Stdout()) - assertLines(t, result.Stdout(), map[int]compareFunc{ 0: equals("rm %v/%s", srcpath, files[0]), 1: equals("rm %v/%s", srcpath, files[1]), From 67dab131164913cb96b171a01eac1df9ae7337e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Wed, 31 Jul 2024 09:54:36 +0300 Subject: [PATCH 15/17] test: fix storageclass test using timestamps --- e2e/sync_test.go | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/e2e/sync_test.go b/e2e/sync_test.go index de478e304..e77d7d351 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -1210,14 +1210,21 @@ func TestSyncS3BucketToS3BucketIsStorageClassChanging(t *testing.T) { // sync dir s3://destbucket/ (same objects, same size, same content, different or same storage class) func TestSyncLocalFolderToS3BucketIsStorageClassChanging(t *testing.T) { t.Parallel() - s3client, s5cmd := setup(t) + now := time.Now() + timeSource := newFixedTimeSource(now) + s3client, s5cmd := setup(t, withTimeSource(timeSource)) bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) + timestamp := fs.WithTimestamps( + now.Add(-time.Minute), + now.Add(-time.Minute), + ) + folderLayout := []fs.PathOp{ - fs.WithFile("testfile1.txt", "this is a test file"), - fs.WithFile("testfile2.txt", "this is a test file"), + fs.WithFile("testfile1.txt", "this is a test file", timestamp), + fs.WithFile("testfile2.txt", "this is a test file", timestamp), } workdir := fs.NewDir(t, "somedir", folderLayout...) @@ -1258,8 +1265,13 @@ func TestSyncLocalFolderToS3BucketIsStorageClassChanging(t *testing.T) { result.Assert(t, icmd.Success) assertLines(t, result.Stdout(), map[int]compareFunc{}) + expectedFiles := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file"), + fs.WithFile("testfile2.txt", "this is a test file"), + } + // expected folder structure without the timestamp. - expected := fs.Expected(t, folderLayout...) + expected := fs.Expected(t, expectedFiles...) assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 objects in destination From 75eb909422a27b2194724044b90d02e7353d4a8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Wed, 31 Jul 2024 10:48:54 +0300 Subject: [PATCH 16/17] test: fix storageclass test using timestamps --- e2e/sync_test.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/e2e/sync_test.go b/e2e/sync_test.go index e77d7d351..d592e6d52 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -1283,7 +1283,15 @@ func TestSyncLocalFolderToS3BucketIsStorageClassChanging(t *testing.T) { // sync s3://srcbucket/ dir (same objects, same size, same content, different or same storage class) func TestSyncS3BucketToLocalFolderIsStorageClassChanging(t *testing.T) { t.Parallel() - s3client, s5cmd := setup(t) + now := time.Now() + timeSource := newFixedTimeSource(now) + s3client, s5cmd := setup(t, withTimeSource(timeSource)) + + // local files are 1 minute newer than the remotes + timestamp := fs.WithTimestamps( + now.Add(time.Minute), + now.Add(time.Minute), + ) bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) @@ -1313,8 +1321,8 @@ func TestSyncS3BucketToLocalFolderIsStorageClassChanging(t *testing.T) { } folderLayout := []fs.PathOp{ - fs.WithFile("testfile1.txt", "this is a test file"), - fs.WithFile("testfile2.txt", "this is a test file"), + fs.WithFile("testfile1.txt", "this is a test file", timestamp), + fs.WithFile("testfile2.txt", "this is a test file", timestamp), } // put objects in local folder @@ -1334,8 +1342,13 @@ func TestSyncS3BucketToLocalFolderIsStorageClassChanging(t *testing.T) { result.Assert(t, icmd.Success) assertLines(t, result.Stdout(), map[int]compareFunc{}) + expectedFiles := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file"), + fs.WithFile("testfile2.txt", "this is a test file"), + } + // expected folder structure without the timestamp. - expected := fs.Expected(t, folderLayout...) + expected := fs.Expected(t, expectedFiles...) assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 objects in destination From a4a7311141533c0a38e78519d22f9a41085d726e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=2EBurak=20Ya=C5=9Far?= <91782773+4o4x@users.noreply.github.com> Date: Wed, 31 Jul 2024 10:54:12 +0300 Subject: [PATCH 17/17] chore: revert unnecessary changes in rm_test.go --- e2e/rm_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/e2e/rm_test.go b/e2e/rm_test.go index 3a0905543..43c6cb628 100644 --- a/e2e/rm_test.go +++ b/e2e/rm_test.go @@ -1339,6 +1339,8 @@ func TestRemoveS3ObjectsWithIncludeFilter(t *testing.T) { result.Assert(t, icmd.Success) + fmt.Println(result.Stdout()) + assertLines(t, result.Stdout(), map[int]compareFunc{ 0: equals("rm %v/%s", srcpath, files[0]), 1: equals("rm %v/%s", srcpath, files[1]),