From c17d4475a10eec6aa28c674e18b9779fb62681ae Mon Sep 17 00:00:00 2001 From: dantb Date: Tue, 9 Apr 2024 00:31:26 +0200 Subject: [PATCH] Tidy --- .../operations/s3/S3StorageSaveFile.scala | 28 +++-- .../s3/client/S3StorageClient.scala | 4 +- ...lStack.scala => S3StorageAccessSpec.scala} | 2 +- ...ack.scala => S3StorageFetchSaveSpec.scala} | 2 +- .../s3/S3StorageSaveAndFetchFileSpec.scala | 101 ------------------ tests/docker/config/delta-postgres.conf | 2 +- 6 files changed, 18 insertions(+), 121 deletions(-) rename delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/{S3StorageAccessSpecLocalStack.scala => S3StorageAccessSpec.scala} (97%) rename delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/{S3StorageFetchSaveSpecLocalStack.scala => S3StorageFetchSaveSpec.scala} (98%) delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveFile.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveFile.scala index 87f217cebc..a8afd3beef 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveFile.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveFile.scala @@ -58,13 +58,7 @@ final class S3StorageSaveFile(s3StorageClient: S3StorageClient, storage: S3Stora (for { _ <- log(key, s"Checking for object existence") - _ <- getFileAttributes(key).redeemWith( - { - case _: NoSuchKeyException => IO.unit - case e => IO.raiseError(e) - }, - _ => IO.raiseError(ResourceAlreadyExists(key)) - ) + _ <- validateObjectDoesNotExist(key) _ <- log(key, s"Beginning multipart upload") maybeEtags <- uploadFileMultipart(fileData, key) _ <- log(key, s"Finished multipart upload. Etag by part: $maybeEtags") @@ -73,6 +67,15 @@ final class S3StorageSaveFile(s3StorageClient: S3StorageClient, storage: S3Stora .adaptError { err => UnexpectedSaveError(key, err.getMessage) } } + private def validateObjectDoesNotExist(key: String) = + getFileAttributes(key).redeemWith( + { + case _: NoSuchKeyException => IO.unit + case e => IO.raiseError(e) + }, + _ => IO.raiseError(ResourceAlreadyExists(key)) + ) + private def convertStream(source: Source[ByteString, Any]): Stream[IO, Byte] = StreamConverter( source @@ -80,8 +83,6 @@ final class S3StorageSaveFile(s3StorageClient: S3StorageClient, storage: S3Stora .mapMaterializedValue(_ => NotUsed) ) - // TODO test etags and file round trip for true multipart uploads. - // It's not clear whether the last value in the stream will be the final aggregate checksum private def uploadFileMultipart(fileData: Stream[IO, Byte], key: String): IO[List[Option[ETag]]] = fileData .through( @@ -117,8 +118,8 @@ final class S3StorageSaveFile(s3StorageClient: S3StorageClient, storage: S3Stora case Some(onlyPartETag :: Nil) => // TODO our tests expect specific values for digests and the only algorithm currently used is SHA-256. // If we want to continue to check this, but allow for different algorithms, this needs to be abstracted - // in the tests and checked on specific file contents. - // The result will depend on whether we use a multipart upload or a standard put object. + // in the tests and verified for specific file contents. + // The result will als depend on whether we use a multipart upload or a standard put object. for { _ <- log(key, s"Received ETag for single part upload: $onlyPartETag") fileSize <- computeSize(bytes) @@ -161,8 +162,5 @@ final class S3StorageSaveFile(s3StorageClient: S3StorageClient, storage: S3Stora private def raiseUnexpectedErr[A](key: String, msg: String): IO[A] = IO.raiseError(UnexpectedSaveError(key, msg)) - private def log(key: String, msg: String) = { - val thing = s"Bucket: ${bucket.value}. Key: $key. $msg" - logger.info(s"Bucket: ${bucket.value}. Key: $key. $msg") >> IO.println(thing) - } + private def log(key: String, msg: String) = logger.info(s"Bucket: ${bucket.value}. Key: $key. $msg") } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/client/S3StorageClient.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/client/S3StorageClient.scala index ba8843e0d0..4b095ae3cd 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/client/S3StorageClient.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/client/S3StorageClient.scala @@ -58,8 +58,8 @@ object S3StorageClient { def readFile(bucket: String, fileKey: String): fs2.Stream[IO, Byte] = for { - bk <- Stream.fromEither[IO].apply(refineV[NonEmpty](bucket).leftMap(e => new IllegalArgumentException(e))) - fk <- Stream.fromEither[IO].apply(refineV[NonEmpty](fileKey).leftMap(e => new IllegalArgumentException(e))) + bk <- Stream.fromEither[IO](refineV[NonEmpty](bucket).leftMap(e => new IllegalArgumentException(e))) + fk <- Stream.fromEither[IO](refineV[NonEmpty](fileKey).leftMap(e => new IllegalArgumentException(e))) bytes <- s3.readFile(BucketName(bk), FileKey(fk)) } yield bytes diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpecLocalStack.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpec.scala similarity index 97% rename from delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpecLocalStack.scala rename to delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpec.scala index cf1962786f..a52da0f6c9 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpecLocalStack.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpec.scala @@ -11,7 +11,7 @@ import software.amazon.awssdk.services.s3.model.{CreateBucketRequest, DeleteBuck import scala.concurrent.duration.{Duration, DurationInt} -class S3StorageAccessSpecLocalStack +class S3StorageAccessSpec extends NexusSuite with StorageFixtures with LocalStackS3StorageClient.Fixture diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageFetchSaveSpecLocalStack.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageFetchSaveSpec.scala similarity index 98% rename from delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageFetchSaveSpecLocalStack.scala rename to delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageFetchSaveSpec.scala index 0a6830797b..0ebf790eb8 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageFetchSaveSpecLocalStack.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageFetchSaveSpec.scala @@ -24,7 +24,7 @@ import java.util.UUID import scala.concurrent.duration.{Duration, DurationInt} import scala.jdk.CollectionConverters.ListHasAsScala -class S3StorageFetchSaveSpecLocalStack +class S3StorageFetchSaveSpec extends NexusSuite with StorageFixtures with ActorSystemSetup.Fixture diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala deleted file mode 100644 index 2f6269b6ba..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala +++ /dev/null @@ -1,101 +0,0 @@ -//package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3 -// -//import akka.actor.ActorSystem -//import akka.http.scaladsl.model.{HttpEntity, Uri} -//import akka.testkit.TestKit -//import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.ComputedDigest -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileStorageMetadata -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StorageFixtures -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.DigestAlgorithm -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.S3Storage -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.S3StorageValue -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.AkkaSourceHelpers -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchFileRejection.FileNotFound -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.SaveFileRejection.ResourceAlreadyExists -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.MinioSpec._ -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.client.S3StorageClient.S3StorageClientDisabled -//import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.permissions.{read, write} -//import ch.epfl.bluebrain.nexus.delta.sdk.syntax._ -//import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef -//import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker -//import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker._ -//import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec -//import io.circe.Json -//import org.scalatest.{BeforeAndAfterAll, DoNotDiscover} -//import software.amazon.awssdk.regions.Region -// -//import java.util.UUID -// -//@DoNotDiscover -//class S3StorageSaveAndFetchFileSpec(docker: MinioDocker) -// extends TestKit(ActorSystem("S3StorageSaveAndFetchFileSpec")) -// with CatsEffectSpec -// with AkkaSourceHelpers -// with StorageFixtures -// with BeforeAndAfterAll { -// -// private val iri = iri"http://localhost/s3" -// private val uuid = UUID.fromString("8049ba90-7cc6-4de5-93a1-802c04200dcc") -// implicit private val uuidf: UUIDF = UUIDF.fixed(uuid) -// private val project = ProjectRef.unsafe("org", "project") -// private val filename = "myfile.txt" -// private val digest = -// ComputedDigest(DigestAlgorithm.default, "e0ac3601005dfa1864f5392aabaf7d898b1b5bab854f1acb4491bcd806b76b0c") -// -// private var storageValue: S3StorageValue = _ -// private var storage: S3Storage = _ -// private var metadata: FileStorageMetadata = _ -// -// override protected def beforeAll(): Unit = { -// super.beforeAll() -// storageValue = S3StorageValue( -// default = false, -// algorithm = DigestAlgorithm.default, -// bucket = "bucket2", -// endpoint = Some(docker.hostConfig.endpoint), -// region = Some(Region.EU_CENTRAL_1), -// readPermission = read, -// writePermission = write, -// maxFileSize = 20 -// ) -// createBucket(storageValue).accepted -// storage = S3Storage(iri, project, storageValue, Json.obj()) -// metadata = FileStorageMetadata( -// uuid, -// 12, -// digest, -// Client, -// s"http://bucket2.$VirtualHost:${docker.hostConfig.port}/org/project/8/0/4/9/b/a/9/0/myfile.txt", -// Uri.Path("org/project/8/0/4/9/b/a/9/0/myfile.txt") -// ) -// } -// -// override protected def afterAll(): Unit = { -// deleteBucket(storageValue).accepted -// super.afterAll() -// } -// -// "S3Storage operations" should { -// val content = "file content" -// val entity = HttpEntity(content) -// -// "save a file to a bucket" in { -// storage.saveFile(config).apply(filename, entity).accepted shouldEqual metadata -// } -// -// "fetch a file from a bucket" in { -// val sourceFetched = storage.fetchFile(S3StorageClientDisabled).apply(metadata.path).accepted -// consume(sourceFetched) shouldEqual content -// } -// -// "fail fetching a file that does not exist" in { -// storage.fetchFile(S3StorageClientDisabled).apply(Uri.Path("other.txt")).rejectedWith[FileNotFound] -// } -// -// "fail attempting to save the same file again" in { -// storage.saveFile(config).apply(filename, entity).rejectedWith[ResourceAlreadyExists] -// } -// } -//} diff --git a/tests/docker/config/delta-postgres.conf b/tests/docker/config/delta-postgres.conf index 88e499bb62..e0f914959c 100644 --- a/tests/docker/config/delta-postgres.conf +++ b/tests/docker/config/delta-postgres.conf @@ -110,7 +110,7 @@ plugins { } remote-disk { - enabled = false + enabled = true credentials { type: "client-credentials" user: "delta"