diff --git a/command/sync.go b/command/sync.go index 092003017..6dede5a66 100644 --- a/command/sync.go +++ b/command/sync.go @@ -357,7 +357,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipObject(st, true) { + if s.shouldSkipSrcObject(st, true) { continue } filteredSrcObjectChannel <- *st @@ -404,7 +404,7 @@ func (s Sync) getSourceAndDestinationObjects(ctx context.Context, cancel context log.Error(msg) cancel() } - if s.shouldSkipObject(dt, false) { + if s.shouldSkipDstObject(dt, false) { continue } filteredDstObjectChannel <- *dt @@ -550,7 +550,7 @@ func generateDestinationURL(srcurl, dsturl *url.URL, isBatch bool) *url.URL { } // shouldSkipObject checks is object should be skipped. -func (s Sync) shouldSkipObject(object *storage.Object, verbose bool) bool { +func (s Sync) shouldSkipSrcObject(object *storage.Object, verbose bool) bool { if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) { return true } @@ -572,6 +572,21 @@ func (s Sync) shouldSkipObject(object *storage.Object, verbose bool) bool { return false } +func (s Sync) shouldSkipDstObject(object *storage.Object, verbose bool) bool { + if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) { + return true + } + + if err := object.Err; err != nil { + if verbose { + printError(s.fullCommand, s.op, err) + } + return true + } + + return false +} + // shouldStopSync determines whether a sync process should be stopped or not. func (s Sync) shouldStopSync(err error) bool { if err == storage.ErrNoObjectFound { diff --git a/e2e/sync_test.go b/e2e/sync_test.go index 7bd8e60da..d592e6d52 100644 --- a/e2e/sync_test.go +++ b/e2e/sync_test.go @@ -6,9 +6,13 @@ import ( "os" "path/filepath" "runtime" + "strings" "testing" "time" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "gotest.tools/v3/assert" "gotest.tools/v3/fs" "gotest.tools/v3/icmd" @@ -88,7 +92,6 @@ func TestSyncSingleS3ObjectToLocalTwice(t *testing.T) { // rerunning same command should not download object, empty result expected result = icmd.RunCmd(cmd) result.Assert(t, icmd.Success) - assertLines(t, result.Stdout(), map[int]compareFunc{}) } // sync s3://bucket/dir/source.go . @@ -505,14 +508,14 @@ func TestSyncS3BucketToEmptyFolder(t *testing.T) { bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -554,7 +557,7 @@ func TestSyncS3BucketToEmptyFolder(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -573,14 +576,14 @@ func TestSyncS3BucketToEmptyS3Bucket(t *testing.T) { createBucket(t, s3client, bucket) createBucket(t, s3client, dstbucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -601,12 +604,12 @@ func TestSyncS3BucketToEmptyS3Bucket(t *testing.T) { }, sortInput(true)) // assert s3 objects in source bucket. - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } // assert s3 objects in dest bucket - for key, content := range S3Content { + for key, content := range s3Content { key = fmt.Sprintf("%s/%s", prefix, key) // add the prefix assert.Assert(t, ensureS3Object(s3client, dstbucket, key, content)) } @@ -642,14 +645,14 @@ func TestSyncLocalFolderToS3BucketSameObjectsSourceOlder(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "main.py": "D: this is a python file", "testfile.txt": "D: this is a test file", "readme.md": "D: this is a readme file", "a/another_test_file.txt": "D: yet another txt file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -683,7 +686,7 @@ func TestSyncLocalFolderToS3BucketSameObjectsSourceOlder(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -717,13 +720,13 @@ func TestSyncLocalFolderToS3BucketSourceNewer(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "D: this is a test file ", "readme.md": "D: this is a readme file", "dir/main.py": "D: python file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -796,14 +799,14 @@ func TestSyncS3BucketToLocalFolderSameObjectsSourceOlder(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "main.py": "S: this is a python file", "testfile.txt": "S: this is a test file", // content different from local "readme.md": "S: this is a readme file", // content different from local "a/another_test_file.txt": "S: yet another txt file", // content different from local } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -838,7 +841,7 @@ func TestSyncS3BucketToLocalFolderSameObjectsSourceOlder(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -873,14 +876,14 @@ func TestSyncS3BucketToLocalFolderSameObjectsSourceNewer(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "main.py": "S: this is a python file", "testfile.txt": "S: this is an updated test file", "readme.md": "S: this is an updated readme file", "a/another_test_file.txt": "S: yet another updated txt file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -915,7 +918,7 @@ func TestSyncS3BucketToLocalFolderSameObjectsSourceNewer(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -1076,7 +1079,7 @@ func TestSyncS3BucketToLocalFolderSameObjectsSizeOnly(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "test.py": "S: this is an updated python file", // content different from local, different size "testfile.txt": "S: this is a test file", // content different from local, same size "readme.md": "S: this is a readme file", // content different from local, same size @@ -1084,7 +1087,7 @@ func TestSyncS3BucketToLocalFolderSameObjectsSizeOnly(t *testing.T) { "abc/def/main.py": "S: python file", // local does not have it. } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -1126,11 +1129,505 @@ func TestSyncS3BucketToLocalFolderSameObjectsSizeOnly(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } +// sync s3://bucket/* s3://destbucket/ (same objects, same size, same content, different or same storage class) +func TestSyncS3BucketToS3BucketIsStorageClassChanging(t *testing.T) { + t.Parallel() + s3client, s5cmd := setup(t) + + srcbucket := s3BucketFromTestName(t) + dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") + + createBucket(t, s3client, srcbucket) + createBucket(t, s3client, dstbucket) + + storageClassesAndFile := []struct { + srcStorageClass string + dstStorageClass string + filename string + content string + }{ + {"STANDARD", "STANDARD", "testfile1.txt", "this is a test file"}, + {"STANDARD", "GLACIER", "testfile2.txt", "this is a test file"}, + {"GLACIER", "STANDARD", "testfile3.txt", "this is a test file"}, + {"GLACIER", "GLACIER", "testfile4.txt", "this is a test file"}, + } + + for _, sc := range storageClassesAndFile { + + putObject := s3.PutObjectInput{ + Bucket: &srcbucket, + Key: &sc.filename, + Body: strings.NewReader(sc.content), + StorageClass: &sc.srcStorageClass, + } + + _, err := s3client.PutObject(&putObject) + if err != nil { + t.Fatalf("failed to put object in %v: %v", sc.srcStorageClass, err) + } + + putObject = s3.PutObjectInput{ + Bucket: &dstbucket, + Key: &sc.filename, + Body: strings.NewReader(sc.content), + StorageClass: aws.String(sc.dstStorageClass), + } + + _, err = s3client.PutObject(&putObject) + if err != nil { + t.Fatalf("failed to put object in %v: %v", sc.dstStorageClass, err) + } + + } + + bucketPath := fmt.Sprintf("s3://%v", srcbucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("s3://%v/", dstbucket) + + cmd := s5cmd("sync", src, dst) + result := icmd.RunCmd(cmd) + + // there will be no stdout, since there are no changes + result.Assert(t, icmd.Success) + assertLines(t, result.Stdout(), map[int]compareFunc{}) + + // assert s3 objects in source + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, srcbucket, sc.filename, sc.content, ensureStorageClass(sc.srcStorageClass))) + } + + // assert s3 objects in destination + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, dstbucket, sc.filename, sc.content, ensureStorageClass(sc.dstStorageClass))) + } +} + +// sync dir s3://destbucket/ (same objects, same size, same content, different or same storage class) +func TestSyncLocalFolderToS3BucketIsStorageClassChanging(t *testing.T) { + t.Parallel() + now := time.Now() + timeSource := newFixedTimeSource(now) + s3client, s5cmd := setup(t, withTimeSource(timeSource)) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + timestamp := fs.WithTimestamps( + now.Add(-time.Minute), + now.Add(-time.Minute), + ) + + folderLayout := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file", timestamp), + fs.WithFile("testfile2.txt", "this is a test file", timestamp), + } + + workdir := fs.NewDir(t, "somedir", folderLayout...) + defer workdir.Remove() + + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, + } + + for _, sc := range storageClassesAndFile { + + putObject := s3.PutObjectInput{ + Bucket: &bucket, + Key: &sc.filename, + Body: strings.NewReader(sc.content), + StorageClass: aws.String(sc.storageClass), + } + + _, err := s3client.PutObject(&putObject) + if err != nil { + t.Fatalf("failed to put object in %v: %v", sc.storageClass, err) + } + } + + src := fmt.Sprintf("%v/", workdir.Path()) + src = filepath.ToSlash(src) + dst := fmt.Sprintf("s3://%v/", bucket) + + cmd := s5cmd("sync", src, dst) + result := icmd.RunCmd(cmd) + + // there will be no stdout + result.Assert(t, icmd.Success) + assertLines(t, result.Stdout(), map[int]compareFunc{}) + + expectedFiles := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file"), + fs.WithFile("testfile2.txt", "this is a test file"), + } + + // expected folder structure without the timestamp. + expected := fs.Expected(t, expectedFiles...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) + + // assert s3 objects in destination + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, bucket, sc.filename, sc.content, ensureStorageClass(sc.storageClass))) + } +} + +// sync s3://srcbucket/ dir (same objects, same size, same content, different or same storage class) +func TestSyncS3BucketToLocalFolderIsStorageClassChanging(t *testing.T) { + t.Parallel() + now := time.Now() + timeSource := newFixedTimeSource(now) + s3client, s5cmd := setup(t, withTimeSource(timeSource)) + + // local files are 1 minute newer than the remotes + timestamp := fs.WithTimestamps( + now.Add(time.Minute), + now.Add(time.Minute), + ) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, + } + + for _, sc := range storageClassesAndFile { + + putObject := s3.PutObjectInput{ + Bucket: &bucket, + Key: &sc.filename, + Body: strings.NewReader(sc.content), + StorageClass: aws.String(sc.storageClass), + } + + _, err := s3client.PutObject(&putObject) + if err != nil { + t.Fatalf("failed to put object in %v: %v", sc.storageClass, err) + } + } + + folderLayout := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file", timestamp), + fs.WithFile("testfile2.txt", "this is a test file", timestamp), + } + + // put objects in local folder + + workdir := fs.NewDir(t, "somedir", folderLayout...) + defer workdir.Remove() + + bucketPath := fmt.Sprintf("s3://%v", bucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("%v/", workdir.Path()) + dst = filepath.ToSlash(dst) + + cmd := s5cmd("sync", src, dst) + result := icmd.RunCmd(cmd) + + // there will be no stdout + result.Assert(t, icmd.Success) + assertLines(t, result.Stdout(), map[int]compareFunc{}) + + expectedFiles := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file"), + fs.WithFile("testfile2.txt", "this is a test file"), + } + + // expected folder structure without the timestamp. + expected := fs.Expected(t, expectedFiles...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) + + // assert s3 objects in destination + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, bucket, sc.filename, sc.content, ensureStorageClass(sc.storageClass))) + } +} + +// sync s3://srcbucket/* s3://dstbucket/ (same objects, different size, different content, different or same storage class) +func TestSyncS3BucketToS3BucketIsStorageClassChangingWithDifferentSizeAndContent(t *testing.T) { + t.Parallel() + s3client, s5cmd := setup(t) + + srcbucket := s3BucketFromTestName(t) + dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") + + createBucket(t, s3client, srcbucket) + createBucket(t, s3client, dstbucket) + + storageClassesAndFile := []struct { + srcStorageClass string + dstStorageClass string + filename string + srcContent string + dstContent string + }{ + {"STANDARD", "STANDARD", "testfile1.txt", "this is an updated test file", "this is a test file"}, + {"STANDARD", "GLACIER", "testfile2.txt", "this is an updated test file", "this is a test file"}, + {"GLACIER", "STANDARD", "testfile3.txt", "this is an updated test file", "this is a test file"}, + {"GLACIER", "GLACIER", "testfile4.txt", "this is an updated test file", "this is a test file"}, + } + + for _, sc := range storageClassesAndFile { + + putFile(t, s3client, srcbucket, sc.filename, sc.srcContent, putStorageClass(sc.srcStorageClass)) + + putObject := s3.PutObjectInput{ + Bucket: &dstbucket, + Key: &sc.filename, + Body: strings.NewReader(sc.dstContent), + StorageClass: aws.String(sc.dstStorageClass), + } + + _, err := s3client.PutObject(&putObject) + if err != nil { + t.Fatalf("failed to put object in %v: %v", sc.dstStorageClass, err) + } + } + + bucketPath := fmt.Sprintf("s3://%v", srcbucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("s3://%v/", dstbucket) + + cmd := s5cmd("sync", src, dst) + + result := icmd.RunCmd(cmd) + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`cp %v/testfile1.txt %vtestfile1.txt`, bucketPath, dst), + 1: equals(`cp %v/testfile2.txt %vtestfile2.txt`, bucketPath, dst), + }, sortInput(true)) + + // assert s3 objects in source + for _, sc := range storageClassesAndFile { + assert.Assert(t, ensureS3Object(s3client, srcbucket, sc.filename, sc.srcContent, ensureStorageClass(sc.srcStorageClass))) + } + + // assert s3 objects in destination (file1 and file2 should be updated and file3 and file4 should be same as before) + assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile1.txt", "this is an updated test file"), ensureStorageClass("STANDARD")) + assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile2.txt", "this is an updated test file", ensureStorageClass("STANDARD"))) + assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile3.txt", "this is a test file", ensureStorageClass("STANDARD"))) + assert.Assert(t, ensureS3Object(s3client, dstbucket, "testfile4.txt", "this is a test file", ensureStorageClass("GLACIER"))) +} + +// sync dir s3://destbucket/ (same objects, different size, different content, different or same storage class) +func TestSyncLocalFolderToS3BucketIsStorageClassChangingWithDifferentSizeAndContent(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + folderLayout := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is an updated test file"), + fs.WithFile("testfile2.txt", "this is an updated test file"), + } + + workdir := fs.NewDir(t, "somedir", folderLayout...) + defer workdir.Remove() + + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, + } + + for _, sc := range storageClassesAndFile { + putFile(t, s3client, bucket, sc.filename, sc.content, putStorageClass(sc.storageClass)) + } + + src := fmt.Sprintf("%v/", workdir.Path()) + src = filepath.ToSlash(src) + dst := fmt.Sprintf("s3://%v/", bucket) + + cmd := s5cmd("sync", src, dst) + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`cp %vtestfile1.txt %vtestfile1.txt`, src, dst), + 1: equals(`cp %vtestfile2.txt %vtestfile2.txt`, src, dst), + }, sortInput(true)) + + // expected folder structure without the timestamp. + expected := fs.Expected(t, folderLayout...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) + + // assert s3 objects in destination + assert.Assert(t, ensureS3Object(s3client, bucket, "testfile1.txt", "this is an updated test file"), ensureStorageClass("STANDARD")) + assert.Assert(t, ensureS3Object(s3client, bucket, "testfile2.txt", "this is an updated test file"), ensureStorageClass("STANDARD")) +} + +// sync s3://destbucket/ dir (same objects, different size, different content, different or same storage class) +func TestSyncS3BucketToLocalFolderIsStorageClassChangingWithDifferentSizeAndContent(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is an updated test file"}, + {"GLACIER", "testfile2.txt", "this is an updated test file"}, + } + + for _, sc := range storageClassesAndFile { + putFile(t, s3client, bucket, sc.filename, sc.content, putStorageClass(sc.storageClass)) + } + + folderLayout := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is a test file"), + fs.WithFile("testfile2.txt", "this is a test file"), + } + + // put objects in local folder + workdir := fs.NewDir(t, "somedir", folderLayout...) + defer workdir.Remove() + + bucketPath := fmt.Sprintf("s3://%v", bucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("%v/", workdir.Path()) + dst = filepath.ToSlash(dst) + + cmd := s5cmd("sync", src, dst) + + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + // testfile1.txt should be updated and testfile2.txt shouldn't be updated because it is in glacier. + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`cp %v/testfile1.txt %vtestfile1.txt`, bucketPath, dst), + }, sortInput(true)) + + expectedFolderLayout := []fs.PathOp{ + fs.WithFile("testfile1.txt", "this is an updated test file"), + fs.WithFile("testfile2.txt", "this is a test file"), + } + + // expected folder structure without the timestamp. + expected := fs.Expected(t, expectedFolderLayout...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) +} + +// sync --delete s3://bucket/* s3://destbucket/ (storage class test) +func TestSyncS3BucketToS3BucketWithDeleteStorageClass(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + srcbucket := s3BucketFromTestName(t) + dstbucket := s3BucketFromTestNameWithPrefix(t, "dst") + + createBucket(t, s3client, srcbucket) + createBucket(t, s3client, dstbucket) + + dstStorageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, + } + + for _, sc := range dstStorageClassesAndFile { + putFile(t, s3client, dstbucket, sc.filename, sc.content, putStorageClass(sc.storageClass)) + } + + bucketPath := fmt.Sprintf("s3://%v", srcbucket) + src := fmt.Sprintf("%s/*", bucketPath) + dst := fmt.Sprintf("s3://%v/", dstbucket) + + cmd := s5cmd("sync", "--delete", src, dst) + + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`rm %vtestfile1.txt`, dst), + 1: equals(`rm %vtestfile2.txt`, dst), + }, sortInput(true)) + + // assert s3 objects in destination + for _, sc := range dstStorageClassesAndFile { + err := ensureS3Object(s3client, dstbucket, sc.filename, sc.content, ensureStorageClass(sc.storageClass)) + assertError(t, err, errS3NoSuchKey) + } +} + +// sync --delete dir s3://destbucket/ (storage class test) +func TestSyncLocalFolderToS3BucketWithDeleteStorageClass(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + storageClassesAndFile := []struct { + storageClass string + filename string + content string + }{ + {"STANDARD", "testfile1.txt", "this is a test file"}, + {"GLACIER", "testfile2.txt", "this is a test file"}, + } + + for _, sc := range storageClassesAndFile { + putFile(t, s3client, bucket, sc.filename, sc.content, putStorageClass(sc.storageClass)) + } + + workdir := fs.NewDir(t, "somedir") + defer workdir.Remove() + + src := fmt.Sprintf("%v/", workdir.Path()) + src = filepath.ToSlash(src) + dst := fmt.Sprintf("s3://%v/", bucket) + + cmd := s5cmd("sync", "--delete", src, dst) + + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`rm %vtestfile1.txt`, dst), + 1: equals(`rm %vtestfile2.txt`, dst), + }, sortInput(true)) + + // assert s3 objects in destination + for _, sc := range storageClassesAndFile { + err := ensureS3Object(s3client, bucket, sc.filename, sc.content, ensureStorageClass(sc.storageClass)) + assertError(t, err, errS3NoSuchKey) + } +} + // sync --size-only folder/ s3://bucket/ func TestSyncLocalFolderToS3BucketSameObjectsSizeOnly(t *testing.T) { t.Parallel() @@ -1157,14 +1654,14 @@ func TestSyncLocalFolderToS3BucketSameObjectsSizeOnly(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "test.py": "D: this is a python file", "testfile.txt": "D: this is an updated test file", "readme.md": "D: this is a readme file", "a/another_test_file.txt": "D: yet another txt file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -1286,11 +1783,11 @@ func TestSyncS3BucketToLocalWithDelete(t *testing.T) { bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "contributing.md": "S: this is a readme file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -1331,7 +1828,7 @@ func TestSyncS3BucketToLocalWithDelete(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -1344,11 +1841,11 @@ func TestSyncS3BucketToEmptyLocalWithDelete(t *testing.T) { bucket := s3BucketFromTestName(t) createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "contributing.md": "S: this is a readme file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -1377,7 +1874,7 @@ func TestSyncS3BucketToEmptyLocalWithDelete(t *testing.T) { assert.Assert(t, fs.Equal(workdir.Path(), expected)) // assert s3 - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } } @@ -1400,13 +1897,13 @@ func TestSyncLocalToS3BucketWithDelete(t *testing.T) { workdir := fs.NewDir(t, "somedir", folderLayout...) defer workdir.Remove() - S3Content := map[string]string{ + s3Content := map[string]string{ "readme.md": "D: this is a readme file", "dir/main.py": "D: this is a python file", "testfile.txt": "D: this is a test file", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -1443,7 +1940,7 @@ func TestSyncLocalToS3BucketWithDelete(t *testing.T) { } // assert s3 objects should be deleted. - for key, content := range S3Content { + for key, content := range s3Content { err := ensureS3Object(s3client, bucket, key, content) if err == nil { t.Errorf("File %v is not deleted from remote : %v\n", key, err) @@ -2014,14 +2511,14 @@ func TestSyncS3BucketToEmptyS3BucketWithExitOnErrorFlag(t *testing.T) { createBucket(t, s3client, bucket) createBucket(t, s3client, dstbucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -2042,12 +2539,12 @@ func TestSyncS3BucketToEmptyS3BucketWithExitOnErrorFlag(t *testing.T) { }, sortInput(true)) // assert s3 objects in source bucket. - for key, content := range S3Content { + for key, content := range s3Content { assert.Assert(t, ensureS3Object(s3client, bucket, key, content)) } // assert s3 objects in dest bucket - for key, content := range S3Content { + for key, content := range s3Content { key = fmt.Sprintf("%s/%s", prefix, key) // add the prefix assert.Assert(t, ensureS3Object(s3client, dstbucket, key, content)) } @@ -2066,14 +2563,14 @@ func TestSyncExitOnErrorS3BucketToS3BucketThatDoesNotExist(t *testing.T) { createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } @@ -2103,14 +2600,14 @@ func TestSyncS3BucketToS3BucketThatDoesNotExist(t *testing.T) { createBucket(t, s3client, bucket) - S3Content := map[string]string{ + s3Content := map[string]string{ "testfile.txt": "S: this is a test file", "readme.md": "S: this is a readme file", "a/another_test_file.txt": "S: yet another txt file", "abc/def/test.py": "S: file in nested folders", } - for filename, content := range S3Content { + for filename, content := range s3Content { putFile(t, s3client, bucket, filename, content) } diff --git a/e2e/util_test.go b/e2e/util_test.go index df854bede..31a8cd162 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -705,7 +705,12 @@ func ensureS3Object( } if opts.storageClass != nil { - if diff := cmp.Diff(opts.storageClass, output.StorageClass); diff != "" { + storageClassofOutput := aws.String("STANDARD") + if output.StorageClass != nil { + storageClassofOutput = output.StorageClass + } + + if diff := cmp.Diff(opts.storageClass, storageClassofOutput); diff != "" { return fmt.Errorf("storage-class of %v/%v: (-want +got):\n%v", bucket, key, diff) } } @@ -743,6 +748,12 @@ func putArbitraryMetadata(metadata map[string]*string) putOption { } } +func putStorageClass(storageClass string) putOption { + return func(opts *s3.PutObjectInput) { + opts.StorageClass = aws.String(storageClass) + } +} + func putFile(t *testing.T, client *s3.S3, bucket string, filename string, content string, opts ...putOption) { t.Helper() input := &s3.PutObjectInput{ diff --git a/error/error.go b/error/error.go index e6dddfd98..fa7925ba4 100644 --- a/error/error.go +++ b/error/error.go @@ -79,13 +79,16 @@ var ( // ErrObjectIsNewerAndSizesMatch indicates the specified object is newer or same age and sizes of objects match. ErrObjectIsNewerAndSizesMatch = fmt.Errorf("%v and %v", ErrObjectIsNewer, ErrObjectSizesMatch) + + // ErrObjectIsGlacier indicates the object is in Glacier storage class. + ErrorObjectIsGlacier = fmt.Errorf("object is in Glacier storage class") ) // IsWarning checks if given error is either ErrObjectExists, // ErrObjectIsNewer or ErrObjectSizesMatch. func IsWarning(err error) bool { switch err { - case ErrObjectExists, ErrObjectIsNewer, ErrObjectSizesMatch, ErrObjectIsNewerAndSizesMatch: + case ErrObjectExists, ErrObjectIsNewer, ErrObjectSizesMatch, ErrObjectIsNewerAndSizesMatch, ErrorObjectIsGlacier: return true } diff --git a/go.mod b/go.mod index e78da34d4..d59733835 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/go-multierror v1.1.1 github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 - github.com/igungor/gofakes3 v0.0.16 + github.com/igungor/gofakes3 v0.0.18 github.com/karrick/godirwalk v1.15.3 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/lanrat/extsort v1.0.0 diff --git a/go.sum b/go.sum index 6eadffd54..bdf3b8a18 100644 --- a/go.sum +++ b/go.sum @@ -24,8 +24,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= -github.com/igungor/gofakes3 v0.0.16 h1:aMipkwE9s2u4T6GfgIPZ8ngJcReYsJvGRm6c4/lLAfY= -github.com/igungor/gofakes3 v0.0.16/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= +github.com/igungor/gofakes3 v0.0.18 h1:LTJV11PrTJ6DGEiCEKXh75wFZtwv773Nof0FB9Ef5qc= +github.com/igungor/gofakes3 v0.0.18/go.mod h1:+rwAKRO9RTGCIeE8SRvRPLSj7PVhaMBLlm1zPXzu7Cs= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= diff --git a/vendor/github.com/igungor/gofakes3/backend.go b/vendor/github.com/igungor/gofakes3/backend.go index d55798a0d..c537e6d5f 100644 --- a/vendor/github.com/igungor/gofakes3/backend.go +++ b/vendor/github.com/igungor/gofakes3/backend.go @@ -13,12 +13,13 @@ const ( // // You MUST always call Contents.Close() otherwise you may leak resources. type Object struct { - Name string - Metadata map[string]string - Size int64 - Contents io.ReadCloser - Hash []byte - Range *ObjectRange + Name string + Metadata map[string]string + Size int64 + Contents io.ReadCloser + Hash []byte + Range *ObjectRange + StorageClass StorageClass // VersionID will be empty if bucket versioning has not been enabled. VersionID VersionID @@ -226,7 +227,7 @@ type Backend interface { // // The size can be used if the backend needs to read the whole reader; use // gofakes3.ReadAll() for this job rather than ioutil.ReadAll(). - PutObject(bucketName, key string, meta map[string]string, input io.Reader, size int64) (PutObjectResult, error) + PutObject(bucketName, key string, meta map[string]string, input io.Reader, size int64, storageClass StorageClass) (PutObjectResult, error) DeleteMulti(bucketName string, objects ...string) (MultiDeleteResult, error) } diff --git a/vendor/github.com/igungor/gofakes3/backend/s3bolt/backend.go b/vendor/github.com/igungor/gofakes3/backend/s3bolt/backend.go index b37b40ebd..5fad0bb6e 100644 --- a/vendor/github.com/igungor/gofakes3/backend/s3bolt/backend.go +++ b/vendor/github.com/igungor/gofakes3/backend/s3bolt/backend.go @@ -57,6 +57,10 @@ func New(bolt *bolt.DB, opts ...Option) *Backend { return b } +func (db *Backend) Close() error { + return db.bolt.Close() +} + // metaBucket returns a utility that manages access to the metadata bucket. // The returned struct is valid only for the lifetime of the bolt.Tx. // The metadata bucket may not exist if this is an older database. @@ -168,6 +172,7 @@ func (db *Backend) ListBucket(name string, prefix *gofakes3.Prefix, page gofakes ETag: `"` + hex.EncodeToString(b.Hash[:]) + `"`, Size: b.Size, LastModified: gofakes3.NewContentTime(b.LastModified.UTC()), + StorageClass: gofakes3.StorageClass(b.StorageClass), } objects.Add(item) } @@ -295,6 +300,7 @@ func (db *Backend) PutObject( bucketName, objectName string, meta map[string]string, input io.Reader, size int64, + storageClass gofakes3.StorageClass, ) (result gofakes3.PutObjectResult, err error) { bts, err := gofakes3.ReadAll(input, size) @@ -318,6 +324,7 @@ func (db *Backend) PutObject( LastModified: mod, Contents: bts, Hash: hash[:], + StorageClass: storageClass, }) if err != nil { return err diff --git a/vendor/github.com/igungor/gofakes3/backend/s3bolt/schema.go b/vendor/github.com/igungor/gofakes3/backend/s3bolt/schema.go index f5908f02e..e093db714 100644 --- a/vendor/github.com/igungor/gofakes3/backend/s3bolt/schema.go +++ b/vendor/github.com/igungor/gofakes3/backend/s3bolt/schema.go @@ -27,6 +27,7 @@ type boltObject struct { Size int64 Contents []byte Hash []byte + StorageClass gofakes3.StorageClass `bson:",omitempty"` } func (b *boltObject) Object(objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (*gofakes3.Object, error) { @@ -42,12 +43,13 @@ func (b *boltObject) Object(objectName string, rangeRequest *gofakes3.ObjectRang } return &gofakes3.Object{ - Name: objectName, - Metadata: b.Metadata, - Size: b.Size, - Contents: s3io.ReaderWithDummyCloser{bytes.NewReader(data)}, - Range: rnge, - Hash: b.Hash, + Name: objectName, + Metadata: b.Metadata, + Size: b.Size, + Contents: s3io.ReaderWithDummyCloser{bytes.NewReader(data)}, + Range: rnge, + Hash: b.Hash, + StorageClass: b.StorageClass, }, nil } diff --git a/vendor/github.com/igungor/gofakes3/backend/s3mem/backend.go b/vendor/github.com/igungor/gofakes3/backend/s3mem/backend.go index 27c7a4f8a..21c5bb19e 100644 --- a/vendor/github.com/igungor/gofakes3/backend/s3mem/backend.go +++ b/vendor/github.com/igungor/gofakes3/backend/s3mem/backend.go @@ -217,7 +217,7 @@ func (db *Backend) GetObject(bucketName, objectName string, rangeRequest *gofake return result, nil } -func (db *Backend) PutObject(bucketName, objectName string, meta map[string]string, input io.Reader, size int64) (result gofakes3.PutObjectResult, err error) { +func (db *Backend) PutObject(bucketName, objectName string, meta map[string]string, input io.Reader, size int64, storageClass gofakes3.StorageClass) (result gofakes3.PutObjectResult, err error) { // No need to lock the backend while we read the data into memory; it holds // the write lock open unnecessarily, and could be blocked for an unreasonably // long time by a connection timing out: diff --git a/vendor/github.com/igungor/gofakes3/error.go b/vendor/github.com/igungor/gofakes3/error.go index d0a71f77e..c85974946 100644 --- a/vendor/github.com/igungor/gofakes3/error.go +++ b/vendor/github.com/igungor/gofakes3/error.go @@ -88,6 +88,8 @@ const ( ErrNotImplemented ErrorCode = "NotImplemented" ErrInternal ErrorCode = "InternalError" + + ErrInvalidStorageClass ErrorCode = "InvalidStorageClass" ) // INTERNAL errors! These are not part of the S3 interface, they are codes diff --git a/vendor/github.com/igungor/gofakes3/gofakes3.go b/vendor/github.com/igungor/gofakes3/gofakes3.go index d76c33254..372ff4927 100644 --- a/vendor/github.com/igungor/gofakes3/gofakes3.go +++ b/vendor/github.com/igungor/gofakes3/gofakes3.go @@ -534,7 +534,7 @@ func (g *GoFakeS3) createObjectBrowserUpload(bucket string, w http.ResponseWrite return err } - result, err := g.storage.PutObject(bucket, key, meta, rdr, fileHeader.Size) + result, err := g.storage.PutObject(bucket, key, meta, rdr, fileHeader.Size, StorageClass(r.Header.Get("x-amz-storage-class"))) if err != nil { return err } @@ -618,7 +618,11 @@ func (g *GoFakeS3) createObject(bucket, object string, w http.ResponseWriter, r return err } - result, err := g.storage.PutObject(bucket, object, meta, rdr, size) + if r.Header.Get("x-amz-storage-class") == "" { + r.Header.Set("x-amz-storage-class", "STANDARD") + } + + result, err := g.storage.PutObject(bucket, object, meta, rdr, size, StorageClass(r.Header.Get("x-amz-storage-class"))) if err != nil { return err } @@ -680,7 +684,7 @@ func (g *GoFakeS3) copyObject(bucket, object string, meta map[string]string, w h } } - result, err := g.storage.PutObject(bucket, object, meta, srcObj.Contents, srcObj.Size) + result, err := g.storage.PutObject(bucket, object, meta, srcObj.Contents, srcObj.Size, StorageClass(r.Header.Get("x-amz-storage-class"))) if err != nil { return err } @@ -895,7 +899,7 @@ func (g *GoFakeS3) completeMultipartUpload(bucket, object string, uploadID Uploa return err } - result, err := g.storage.PutObject(bucket, object, upload.Meta, bytes.NewReader(fileBody), int64(len(fileBody))) + result, err := g.storage.PutObject(bucket, object, upload.Meta, bytes.NewReader(fileBody), int64(len(fileBody)), StorageClass(r.Header.Get("x-amz-storage-class"))) if err != nil { return err } diff --git a/vendor/github.com/igungor/gofakes3/messages.go b/vendor/github.com/igungor/gofakes3/messages.go index 17138fe51..258fed4b3 100644 --- a/vendor/github.com/igungor/gofakes3/messages.go +++ b/vendor/github.com/igungor/gofakes3/messages.go @@ -96,7 +96,7 @@ func NewContentTime(t time.Time) ContentTime { func (c ContentTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error { // This is the format expected by the aws xml code, not the default. if !c.IsZero() { - var s = c.Format("2006-01-02T15:04:05.999Z") + s := c.Format("2006-01-02T15:04:05.999Z") return e.EncodeElement(s, start) } return nil @@ -127,7 +127,7 @@ func (d MultiDeleteResult) AsError() error { if len(d.Error) == 0 { return nil } - var strs = make([]string, 0, len(d.Error)) + strs := make([]string, 0, len(d.Error)) for _, er := range d.Error { strs = append(strs, er.String()) } @@ -256,6 +256,7 @@ type DeleteMarker struct { VersionID VersionID `xml:"VersionId"` IsLatest bool `xml:"IsLatest"` LastModified ContentTime `xml:"LastModified,omitempty"` + StorageClass string `xml:"StorageClass"` Owner *UserInfo `xml:"Owner,omitempty"` } @@ -338,7 +339,6 @@ func NewListBucketVersionsResult( prefix *Prefix, page *ListBucketVersionsPage, ) *ListBucketVersionsResult { - result := &ListBucketVersionsResult{ Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/", Name: bucketName, @@ -428,6 +428,7 @@ type ListMultipartUploadPartItem struct { LastModified ContentTime `xml:"LastModified,omitempty"` ETag string `xml:"ETag,omitempty"` Size int64 `xml:"Size"` + StorageClass string `xml:"StorageClass,omitempty"` } // CopyObjectResult contains the response from a CopyObject operation. @@ -435,6 +436,7 @@ type CopyObjectResult struct { XMLName xml.Name `xml:"CopyObjectResult"` ETag string `xml:"ETag,omitempty"` LastModified ContentTime `xml:"LastModified,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` } // MFADeleteStatus is used by VersioningConfiguration. diff --git a/vendor/github.com/igungor/gofakes3/uploader.go b/vendor/github.com/igungor/gofakes3/uploader.go index c2ce86d48..b1778d072 100644 --- a/vendor/github.com/igungor/gofakes3/uploader.go +++ b/vendor/github.com/igungor/gofakes3/uploader.go @@ -197,13 +197,12 @@ func (u *uploader) ListParts(bucket, object string, uploadID UploadID, marker in return nil, err } - var result = ListMultipartUploadPartsResult{ + result := ListMultipartUploadPartsResult{ Bucket: bucket, Key: object, UploadID: uploadID, MaxParts: limit, PartNumberMarker: marker, - StorageClass: "STANDARD", // FIXME } var cnt int64 diff --git a/vendor/modules.txt b/vendor/modules.txt index c19a12140..650fcfcf6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -91,7 +91,7 @@ github.com/hashicorp/go-multierror # github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 ## explicit github.com/iancoleman/strcase -# github.com/igungor/gofakes3 v0.0.16 +# github.com/igungor/gofakes3 v0.0.18 ## explicit; go 1.13 github.com/igungor/gofakes3 github.com/igungor/gofakes3/backend/s3bolt