diff --git a/delta/kernel/src/main/scala/ch/epfl/bluebrain/nexus/delta/kernel/utils/TransactionalFileCopier.scala b/delta/kernel/src/main/scala/ch/epfl/bluebrain/nexus/delta/kernel/utils/TransactionalFileCopier.scala deleted file mode 100644 index e343354485..0000000000 --- a/delta/kernel/src/main/scala/ch/epfl/bluebrain/nexus/delta/kernel/utils/TransactionalFileCopier.scala +++ /dev/null @@ -1,68 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.kernel.utils - -import cats.data.NonEmptyList -import cats.effect.{IO, Ref} -import cats.implicits._ -import ch.epfl.bluebrain.nexus.delta.kernel.Logger -import ch.epfl.bluebrain.nexus.delta.kernel.error.Rejection -import fs2.io.file.{CopyFlag, CopyFlags, Files, Path} -import java.nio.file.{Path => JPath} - -trait TransactionalFileCopier { - def copyAll(files: NonEmptyList[CopyBetween]): IO[Unit] -} - -final case class CopyBetween(source: Path, destination: Path) -object CopyBetween { - def mk(source: JPath, dest: JPath) = CopyBetween(Path.fromNioPath(source), Path.fromNioPath(dest)) -} - -final case class CopyOperationFailed(failingCopy: CopyBetween, e: Throwable) extends Rejection { - override def reason: String = - s"Copy operation failed from source ${failingCopy.source} to destination ${failingCopy.destination}. Underlying error: $e" -} - -object TransactionalFileCopier { - - private val logger = Logger[TransactionalFileCopier] - - def mk(): TransactionalFileCopier = files => copyAll(files) - - private def copyAll(files: NonEmptyList[CopyBetween]): IO[Unit] = - Ref.of[IO, Option[CopyOperationFailed]](None).flatMap { errorRef => - files - .parTraverse { case c @ CopyBetween(source, dest) => - copySingle(source, dest).onError(e => errorRef.set(Some(CopyOperationFailed(c, e)))) - } - .void - .handleErrorWith { e => - val destinations = files.map(_.destination) - logger.error(e)(s"Transactional files copy failed, deleting created files: ${destinations}") >> - rollbackCopiesAndRethrow(errorRef, destinations) - } - } - - def parent(p: Path): Path = Path.fromNioPath(p.toNioPath.getParent) - - private def copySingle(source: Path, dest: Path): IO[Unit] = - for { - _ <- Files[IO].createDirectories(parent(dest)) - _ <- Files[IO].copy(source, dest, CopyFlags(CopyFlag.CopyAttributes)) - // the copy attributes flag won't always preserve permissions due to umask - sourcePerms <- Files[IO].getPosixPermissions(source) - _ <- Files[IO].setPosixPermissions(dest, sourcePerms) - } yield () - - private def rollbackCopiesAndRethrow( - errorRef: Ref[IO, Option[CopyOperationFailed]], - files: NonEmptyList[Path] - ): IO[Unit] = - errorRef.get.flatMap { - case Some(error) => - files - .filterNot(_ == error.failingCopy.destination) - .parTraverse(dest => Files[IO].deleteRecursively(parent(dest)).attempt.void) >> - IO.raiseError(error) - case None => IO.unit - } -} diff --git a/delta/kernel/src/test/scala/ch/epfl/bluebrain/nexus/delta/kernel/utils/TransactionalFileCopierSuite.scala b/delta/kernel/src/test/scala/ch/epfl/bluebrain/nexus/delta/kernel/utils/TransactionalFileCopierSuite.scala deleted file mode 100644 index 1c11be1c76..0000000000 --- a/delta/kernel/src/test/scala/ch/epfl/bluebrain/nexus/delta/kernel/utils/TransactionalFileCopierSuite.scala +++ /dev/null @@ -1,149 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.kernel.utils - -import cats.data.NonEmptyList -import cats.effect.IO -import cats.syntax.all._ -import ch.epfl.bluebrain.nexus.delta.kernel.utils.TransactionalFileCopier.parent -import fs2.io.file.PosixPermission._ -import fs2.io.file._ -import munit.CatsEffectSuite -import munit.catseffect.IOFixture - -import java.util.UUID - -class TransactionalFileCopierSuite extends CatsEffectSuite { - - val myFixture: IOFixture[Path] = ResourceSuiteLocalFixture("create-temp-dir-fixture", Files[IO].tempDirectory) - override def munitFixtures: List[IOFixture[Path]] = List(myFixture) - lazy val tempDir: Path = myFixture() - - val copier: TransactionalFileCopier = TransactionalFileCopier.mk() - - test("successfully copy contents of multiple files") { - for { - (source1, source1Contents) <- givenAFileExists - (source2, source2Contents) <- givenAFileExists - (dest1, dest2, dest3) = (genFilePath, genFilePath, genFilePath) - files = NonEmptyList.of(CopyBetween(source1, dest1), CopyBetween(source2, dest2), CopyBetween(source1, dest3)) - _ <- copier.copyAll(files) - _ <- fileShouldExistWithContents(source1Contents, dest1) - _ <- fileShouldExistWithContents(source1Contents, dest3) - _ <- fileShouldExistWithContents(source2Contents, dest2) - } yield () - } - - test("successfully copy file attributes") { - for { - (source, contents) <- givenAFileExists - sourceAttr <- Files[IO].getBasicFileAttributes(source) - _ = println(sourceAttr.lastModifiedTime) - dest = genFilePath - files = NonEmptyList.of(CopyBetween(source, dest)) - _ <- copier.copyAll(files) - destAttr <- Files[IO].getBasicFileAttributes(dest) - _ = println(destAttr.lastModifiedTime) - _ <- fileShouldExistWithContentsAndAttributes(dest, contents, sourceAttr) - } yield () - } - - test("successfully copy read-only file") { - val sourcePermissions = PosixPermissions(OwnerRead, GroupRead, OthersRead) - - for { - (source, _) <- givenAFileWithPermissions(sourcePermissions) - dest = genFilePath - files = NonEmptyList.of(CopyBetween(source, dest)) - _ <- copier.copyAll(files) - _ <- fileShouldExistWithPermissions(dest, sourcePermissions) - } yield () - } - - test("rollback by deleting file copies and directories if error thrown during a copy") { - for { - (source, _) <- givenAFileExists - (existingFilePath, _) <- givenAFileExists - (dest1, dest3) = (genFilePath, genFilePath) - failingCopy = CopyBetween(source, existingFilePath) - files = NonEmptyList.of(CopyBetween(source, dest1), failingCopy, CopyBetween(source, dest3)) - error <- copier.copyAll(files).intercept[CopyOperationFailed] - _ <- List(dest1, dest3, parent(dest1), parent(dest3)).traverse(fileShouldNotExist) - _ <- fileShouldExist(existingFilePath) - } yield assertEquals(error.failingCopy, failingCopy) - } - - test("rollback read-only files upon failure") { - val sourcePermissions = PosixPermissions(OwnerRead, GroupRead, OthersRead) - - for { - (source, _) <- givenAFileWithPermissions(sourcePermissions) - (failingDest, _) <- givenAFileExists - dest2 = genFilePath - failingCopy = CopyBetween(source, failingDest) - files = NonEmptyList.of(CopyBetween(source, dest2), failingCopy) - error <- copier.copyAll(files).intercept[CopyOperationFailed] - _ <- List(dest2, parent(dest2)).traverse(fileShouldNotExist) - _ <- fileShouldExist(failingDest) - } yield assertEquals(error.failingCopy, failingCopy) - } - - def genFilePath: Path = tempDir / genString / s"$genString.txt" - - def genString: String = UUID.randomUUID().toString - - def fileShouldHaveAttributes(path: Path, expectedAttr: BasicFileAttributes): IO[Unit] = - Files[IO].getBasicFileAttributes(path).flatMap(assertBasicAttrEqual(_, expectedAttr)) - - def assertPermissionsEqual(path: Path, expectedPerms: PosixPermissions): IO[Unit] = - Files[IO].getPosixPermissions(path).map(assertEquals(_, expectedPerms)) - - def assertBasicAttrEqual(obtained: BasicFileAttributes, expected: BasicFileAttributes): IO[Unit] = - IO { - // TODO: Figure out wht the creationTime assertion fails on the github runners -// assertEquals(obtained.creationTime, expected.creationTime) - assertEquals(obtained.isDirectory, expected.isDirectory) - assertEquals(obtained.isOther, expected.isOther) - assertEquals(obtained.isRegularFile, expected.isRegularFile) - assertEquals(obtained.isSymbolicLink, expected.isSymbolicLink) - assertEquals(obtained.size, expected.size) - } - - def fileShouldExistWithContentsAndAttributes(p: Path, contents: String, attr: BasicFileAttributes): IO[Unit] = - fileShouldExistWithContents(contents, p) >> fileShouldHaveAttributes(p, attr) - - def fileShouldExistWithPermissions(p: Path, perms: PosixPermissions): IO[Unit] = - assertPermissionsEqual(p, perms) - - def fileShouldExistWithContents(contents: String, p: Path): IO[Unit] = - Files[IO].readUtf8(p).compile.string.assertEquals(contents).void - - def fileShouldNotExist(p: Path): IO[Unit] = assertFileExistence(false, p) - - def fileShouldExist(p: Path): IO[Unit] = assertFileExistence(true, p) - - def assertFileExistence(expected: Boolean, p: Path*): IO[Unit] = - p.toList.traverse(Files[IO].exists(_).assertEquals(expected)).void - - def givenADirectory(path: Path, perms: Option[PosixPermissions]): IO[Unit] = Files[IO].createDirectories(path, perms) - - def givenAFileWithPermissions(perms: PosixPermissions): IO[(Path, String)] = { - val path = genFilePath - val contents = genString - - givenADirectory(parent(path), None) >> - Files[IO].createFile(path) >> - writeUtf8(path, contents) >> - Files[IO].setPosixPermissions(path, perms).as((path, contents)) - } - - def givenAFileExists: IO[(Path, String)] = givenAFileAtPathWithContentsAndPermissions(None) - - def givenAFileAtPathWithContentsAndPermissions(perms: Option[PosixPermissions]): IO[(Path, String)] = { - val path = genFilePath - val contents = genString - givenADirectory(parent(path), perms) >> - Files[IO].createFile(path, perms) >> writeUtf8(path, contents).as((path, contents)) - } - - def writeUtf8(path: Path, contents: String): IO[Unit] = - fs2.Stream(contents).through(Files[IO].writeUtf8(path)).compile.drain -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala index 5c8163574f..479576d1a0 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala @@ -1,31 +1,30 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage import akka.actor.typed.ActorSystem -import akka.http.scaladsl.server.Directives.concat import akka.http.scaladsl.model.Uri.Path +import akka.http.scaladsl.server.Directives.concat import cats.effect.{Clock, IO} -import ch.epfl.bluebrain.nexus.delta.kernel.utils.{ClasspathResourceLoader, TransactionalFileCopier, UUIDF} +import ch.epfl.bluebrain.nexus.delta.kernel.utils.{ClasspathResourceLoader, UUIDF} import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.client.ElasticSearchClient import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.config.ElasticSearchViewsConfig import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.Files.FilesLog -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch.{BatchCopy, BatchFiles} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.contexts.{files => fileCtxId} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.{BatchFilesRoutes, DelegateFilesRoutes, FilesRoutes, LinkFilesRoutes} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.{DelegateFilesRoutes, FilesRoutes, LinkFilesRoutes} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.schemas.{files => filesSchemaId} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{FileAttributesUpdateStream, Files} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.{ShowFileLocation, StorageTypeConfig} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.contexts.{storages => storageCtxId, storagesMetadata => storageMetaCtxId} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.FileOperations -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.{DiskFileOperations, DiskStorageCopyFiles} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.DiskFileOperations +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.RemoteDiskFileOperations import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.{RemoteDiskFileOperations, RemoteDiskStorageCopyFiles} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.client.S3StorageClient import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.{S3FileOperations, S3LocationGenerator} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.routes.StoragesRoutes import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.schemas.{storage => storagesSchemaId} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{StorageDeletionTask, StoragePermissionProviderImpl, Storages, StoragesStatistics} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages._ import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.JsonLdApi import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering @@ -108,6 +107,10 @@ class StoragePluginModule(priority: Int) extends ModuleDef { ) } + make[FetchStorage].from { (storages: Storages, aclCheck: AclCheck) => + FetchStorage(storages, aclCheck) + } + make[StoragePermissionProvider].from { (storages: Storages) => new StoragePermissionProviderImpl(storages) } @@ -186,9 +189,8 @@ class StoragePluginModule(priority: Int) extends ModuleDef { make[Files].from { ( cfg: StoragePluginConfig, - aclCheck: AclCheck, fetchContext: FetchContext, - storages: Storages, + fetchStorage: FetchStorage, xas: Transactors, clock: Clock[IO], uuidF: UUIDF, @@ -197,8 +199,7 @@ class StoragePluginModule(priority: Int) extends ModuleDef { ) => Files( fetchContext, - aclCheck, - storages, + fetchStorage, xas, cfg.files, fileOps, @@ -214,42 +215,6 @@ class StoragePluginModule(priority: Int) extends ModuleDef { FileAttributesUpdateStream.start(files, storages, storageTypeConfig.remoteDisk, supervisor) } - make[TransactionalFileCopier].fromValue(TransactionalFileCopier.mk()) - - make[DiskStorageCopyFiles].from { (copier: TransactionalFileCopier, uuidf: UUIDF) => - DiskStorageCopyFiles.mk(copier, uuidf) - } - - make[RemoteDiskStorageCopyFiles].from { client: RemoteDiskStorageClient => RemoteDiskStorageCopyFiles.mk(client) } - - make[BatchCopy].from { - ( - files: Files, - storages: Storages, - aclCheck: AclCheck, - diskCopy: DiskStorageCopyFiles, - remoteDiskCopy: RemoteDiskStorageCopyFiles, - uuidF: UUIDF - ) => - BatchCopy.mk(files, storages, aclCheck, diskCopy, remoteDiskCopy)(uuidF) - } - - make[BatchFiles].from { - ( - fetchContext: FetchContext, - files: Files, - filesLog: FilesLog, - batchCopy: BatchCopy, - uuidF: UUIDF - ) => - BatchFiles.mk( - files, - fetchContext, - FilesLog.eval(filesLog), - batchCopy - )(uuidF) - } - make[FilesRoutes].from { ( showLocation: ShowFileLocation, @@ -315,26 +280,6 @@ class StoragePluginModule(priority: Int) extends ModuleDef { )(baseUri, cr, ordering, showLocation) } - make[BatchFilesRoutes].from { - ( - showLocation: ShowFileLocation, - identities: Identities, - aclCheck: AclCheck, - batchFiles: BatchFiles, - indexingAction: AggregateIndexingAction, - shift: File.Shift, - baseUri: BaseUri, - cr: RemoteContextResolution @Id("aggregate"), - ordering: JsonKeyOrdering - ) => - new BatchFilesRoutes(identities, aclCheck, batchFiles, indexingAction(_, _, _)(shift))( - baseUri, - showLocation, - cr, - ordering - ) - } - make[File.Shift].from { (files: Files, base: BaseUri, showLocation: ShowFileLocation) => File.shift(files)(base, showLocation) } @@ -403,13 +348,12 @@ class StoragePluginModule(priority: Int) extends ModuleDef { many[PriorityRoute].add { ( fileRoutes: FilesRoutes, - batchFileRoutes: BatchFilesRoutes, linkFileRoutes: LinkFilesRoutes, delegationRoutes: DelegateFilesRoutes ) => PriorityRoute( priority, - concat(fileRoutes.routes, linkFileRoutes.routes, batchFileRoutes.routes, delegationRoutes.routes), + concat(fileRoutes.routes, linkFileRoutes.routes, delegationRoutes.routes), requiresStrictEntity = false ) } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FetchFileResource.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FetchFileResource.scala deleted file mode 100644 index d77edd4c01..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FetchFileResource.scala +++ /dev/null @@ -1,15 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files - -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileId - -trait FetchFileResource { - - /** - * Fetch the last version of a file - * - * @param id - * the identifier that will be expanded to the Iri of the file with its optional rev/tag - */ - def fetch(id: FileId): IO[FileResource] -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FetchFileStorage.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FetchFileStorage.scala deleted file mode 100644 index d97b030b03..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FetchFileStorage.scala +++ /dev/null @@ -1,14 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files - -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller -import ch.epfl.bluebrain.nexus.delta.sdk.model._ -import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ProjectContext -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ProjectRef, ResourceRef} - -trait FetchFileStorage { - def fetchAndValidateActiveStorage(storageIdOpt: Option[IdSegment], ref: ProjectRef, pc: ProjectContext)(implicit - caller: Caller - ): IO[(ResourceRef.Revision, Storage)] -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStream.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStream.scala index 1858fad1da..bb9b18d4c4 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStream.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStream.scala @@ -1,14 +1,13 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.files import cats.effect.IO -import cats.syntax.all._ import ch.epfl.bluebrain.nexus.delta.kernel.Logger import ch.epfl.bluebrain.nexus.delta.kernel.cache.LocalCache import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileState import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.Storages import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.RemoteDiskStorageConfig -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{Storage, StorageRejection, StorageType} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{Storage, StorageType} import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ProjectRef, ResourceRef, SuccessElemStream} import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset @@ -85,9 +84,6 @@ object FileAttributesUpdateStream { storages .fetch(IdSegmentRef(id), project) .map(_.value) - .adaptError { case e: StorageRejection => - WrappedStorageRejection(e) - } ) new Impl( diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala index f1a95acc3b..7f52c0b713 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala @@ -17,22 +17,17 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileEvent._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.schemas.{files => fileSchema} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.{StorageFetchRejection, StorageIsDeprecated} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{DigestAlgorithm, Storage, StorageRejection, StorageType} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{DigestAlgorithm, Storage, StorageType} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{FetchAttributeRejection, FetchFileRejection, SaveFileRejection} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{FetchStorage, Storages} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue import ch.epfl.bluebrain.nexus.delta.sdk.AkkaSource -import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck import ch.epfl.bluebrain.nexus.delta.sdk.directives.FileResponse -import ch.epfl.bluebrain.nexus.delta.sdk.error.ServiceError.AuthorizationFailed import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef.{Latest, Revision, Tag} import ch.epfl.bluebrain.nexus.delta.sdk.model._ -import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContext import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.{ApiMappings, ProjectContext} import ch.epfl.bluebrain.nexus.delta.sourcing.ScopedEntityDefinition.Tagger @@ -50,13 +45,10 @@ import java.util.UUID final class Files( formDataExtractor: FormDataExtractor, log: FilesLog, - aclCheck: AclCheck, fetchContext: FetchContext, - storages: FetchStorage, + fetchStorage: FetchStorage, fileOperations: FileOperations -)(implicit uuidF: UUIDF) - extends FetchFileStorage - with FetchFileResource { +)(implicit uuidF: UUIDF) { implicit private val kamonComponent: KamonMetricComponent = KamonMetricComponent(entityType.value) @@ -71,7 +63,7 @@ final class Files( * * @param storageId * the optional storage identifier to expand as the id of the storage. When None, the default storage is used - * @param projectRef + * @param project * the project where the file will belong * @param uploadRequest * the upload request containing the form data entity @@ -80,17 +72,18 @@ final class Files( */ def create( storageId: Option[IdSegment], - projectRef: ProjectRef, + project: ProjectRef, uploadRequest: FileUploadRequest, tag: Option[UserTag] )(implicit caller: Caller): IO[FileResource] = { for { - pc <- fetchContext.onCreate(projectRef) + pc <- fetchContext.onCreate(project) iri <- generateId(pc) - _ <- test(CreateFile(iri, projectRef, testStorageRef, testStorageType, testAttributes, caller.subject, tag)) - (storageRef, storage) <- fetchAndValidateActiveStorage(storageId, projectRef, pc) + storageIri <- storageId.traverse(expandStorageIri(_, pc)) + _ <- test(CreateFile(iri, project, testStorageRef, testStorageType, testAttributes, caller.subject, tag)) + (storageRef, storage) <- fetchStorage.onWrite(storageIri, project) attributes <- saveFileToStorage(iri, storage, uploadRequest) - res <- eval(CreateFile(iri, projectRef, storageRef, storage.tpe, attributes, caller.subject, tag)) + res <- eval(CreateFile(iri, project, storageRef, storage.tpe, attributes, caller.subject, tag)) } yield res }.span("createFile") @@ -114,8 +107,9 @@ final class Files( )(implicit caller: Caller): IO[FileResource] = { for { (iri, pc) <- id.expandIri(fetchContext.onCreate) + storageIri <- storageId.traverse(expandStorageIri(_, pc)) _ <- test(CreateFile(iri, id.project, testStorageRef, testStorageType, testAttributes, caller.subject, tag)) - (storageRef, storage) <- fetchAndValidateActiveStorage(storageId, id.project, pc) + (storageRef, storage) <- fetchStorage.onWrite(storageIri, id.project) metadata <- saveFileToStorage(iri, storage, uploadRequest) res <- eval(CreateFile(iri, id.project, storageRef, storage.tpe, metadata, caller.subject, tag)) } yield res @@ -141,9 +135,10 @@ final class Files( tag: Option[UserTag] )(implicit caller: Caller): IO[FileResource] = { for { - pc <- fetchContext.onCreate(projectRef) - iri <- generateId(pc) - res <- createLegacyLink(iri, projectRef, pc, storageId, description, path, tag) + pc <- fetchContext.onCreate(projectRef) + iri <- generateId(pc) + storageIri <- storageId.traverse(expandStorageIri(_, pc)) + res <- createLegacyLink(iri, projectRef, storageIri, description, path, tag) } yield res }.span("createLink") @@ -167,8 +162,9 @@ final class Files( tag: Option[UserTag] )(implicit caller: Caller): IO[FileResource] = { for { - (iri, pc) <- id.expandIri(fetchContext.onCreate) - res <- createLegacyLink(iri, id.project, pc, storageId, description, path, tag) + (iri, pc) <- id.expandIri(fetchContext.onCreate) + storageIri <- storageId.traverse(expandStorageIri(_, pc)) + res <- createLegacyLink(iri, id.project, storageIri, description, path, tag) } yield res }.span("createLink") @@ -176,7 +172,7 @@ final class Files( * Grants a delegation to create the physical file on the given storage * @param id * the file identifier to expand as the iri of the file, generated if none is provided - * @param projectRef + * @param project * the project where the file will belong * @param description * a description of the file @@ -185,7 +181,7 @@ final class Files( */ def createDelegate( id: Option[IdSegment], - projectRef: ProjectRef, + project: ProjectRef, description: FileDescription, storageId: Option[IdSegment], tag: Option[UserTag] @@ -193,19 +189,20 @@ final class Files( caller: Caller ): IO[FileDelegationRequest] = { for { - pc <- fetchContext.onCreate(projectRef) + pc <- fetchContext.onCreate(project) iri <- id.fold(generateId(pc)) { FileId.iriExpander(_, pc) } - _ <- test(CreateFile(iri, projectRef, testStorageRef, testStorageType, testAttributes, caller.subject, tag)) - (_, storage) <- fetchAndValidateActiveStorage(storageId, projectRef, pc) + storageIri <- storageId.traverse(expandStorageIri(_, pc)) + _ <- test(CreateFile(iri, project, testStorageRef, testStorageType, testAttributes, caller.subject, tag)) + (_, storage) <- fetchStorage.onWrite(storageIri, project) targetLocation <- fileOperations.delegate(storage, description.filename) - } yield FileDelegationCreationRequest(projectRef, iri, targetLocation, description, tag) + } yield FileDelegationCreationRequest(project, iri, targetLocation, description, tag) }.span("createDelegate") /** * Grants a delegation to create the physical file on the given storage * @param id * the file identifier to expand as the iri of the file - * @param projectRef + * @param project * the project where the file will belong * @param rev * the current revision of the file @@ -214,7 +211,7 @@ final class Files( */ def updateDelegate( id: IdSegment, - projectRef: ProjectRef, + project: ProjectRef, rev: Int, description: FileDescription, storageId: Option[IdSegment], @@ -223,13 +220,13 @@ final class Files( caller: Caller ): IO[FileDelegationRequest] = { for { - pc <- fetchContext.onModify(projectRef) + pc <- fetchContext.onModify(project) iri <- FileId.iriExpander(id, pc) - _ <- - test(UpdateFile(iri, projectRef, testStorageRef, testStorageType, testAttributes, rev, caller.subject, tag)) - (_, storage) <- fetchAndValidateActiveStorage(storageId, projectRef, pc) + storageIri <- storageId.traverse(expandStorageIri(_, pc)) + _ <- test(UpdateFile(iri, project, testStorageRef, testStorageType, testAttributes, rev, caller.subject, tag)) + (_, storage) <- fetchStorage.onWrite(storageIri, project) targetLocation <- fileOperations.delegate(storage, description.filename) - } yield FileDelegationUpdateRequest(projectRef, iri, rev, targetLocation, description, tag) + } yield FileDelegationUpdateRequest(project, iri, rev, targetLocation, description, tag) }.span("updateDelegate") /** @@ -255,8 +252,9 @@ final class Files( )(implicit caller: Caller): IO[FileResource] = { for { (iri, pc) <- id.expandIri(fetchContext.onModify) + storageIri <- storageId.traverse(expandStorageIri(_, pc)) _ <- test(UpdateFile(iri, id.project, testStorageRef, testStorageType, testAttributes, rev, caller.subject, tag)) - (storageRef, storage) <- fetchAndValidateActiveStorage(storageId, id.project, pc) + (storageRef, storage) <- fetchStorage.onWrite(storageIri, id.project) attributes <- saveFileToStorage(iri, storage, uploadRequest) res <- eval(UpdateFile(iri, id.project, storageRef, storage.tpe, attributes, rev, caller.subject, tag)) } yield res @@ -284,7 +282,8 @@ final class Files( for { projectContext <- fetchContext.onCreate(project) iri <- id.fold(generateId(projectContext)) { FileId.iriExpander(_, projectContext) } - (storageRef, storage) <- fetchAndValidateActiveStorage(storageId, project, projectContext) + storageIri <- storageId.traverse(expandStorageIri(_, projectContext)) + (storageRef, storage) <- fetchStorage.onWrite(storageIri, project) s3Metadata <- fileOperations.link(storage, linkRequest.path) filename <- IO.fromOption(linkRequest.path.lastSegment)(InvalidFilePath) attr = FileAttributes.from( @@ -304,8 +303,9 @@ final class Files( )(implicit caller: Caller): IO[FileResource] = { for { (iri, pc) <- id.expandIri(fetchContext.onModify) + storageIri <- storageId.traverse(expandStorageIri(_, pc)) _ <- test(UpdateFile(iri, id.project, testStorageRef, testStorageType, testAttributes, rev, caller.subject, tag)) - (storageRef, storage) <- fetchAndValidateActiveStorage(storageId, id.project, pc) + (storageRef, storage) <- fetchStorage.onWrite(storageIri, id.project) s3Metadata <- fileOperations.link(storage, linkRequest.path) filename <- IO.fromOption(linkRequest.path.lastSegment)(InvalidFilePath) attr = FileAttributes.from( @@ -349,24 +349,12 @@ final class Files( )(implicit caller: Caller): IO[FileResource] = { for { (iri, pc) <- id.expandIri(fetchContext.onModify) + storageIri <- storageId.traverse(expandStorageIri(_, pc)) _ <- test(UpdateFile(iri, id.project, testStorageRef, testStorageType, testAttributes, rev, caller.subject, tag)) - (storageRef, storage) <- fetchAndValidateActiveStorage(storageId, id.project, pc) + (storageRef, storage) <- fetchStorage.onWrite(storageIri, id.project) metadata <- legacyLinkFile(storage, path, description.filename, iri) - res <- eval( - UpdateFile( - iri, - id.project, - storageRef, - storage.tpe, - FileAttributes.from( - description, - metadata - ), - rev, - caller.subject, - tag - ) - ) + attributes = FileAttributes.from(description, metadata) + res <- eval(UpdateFile(iri, id.project, storageRef, storage.tpe, attributes, rev, caller.subject, tag)) } yield res }.span("updateLink") @@ -461,9 +449,8 @@ final class Files( for { file <- fetch(id) attributes = file.value.attributes - storage <- storages.fetch(file.value.storage, id.project) - _ <- validateAuth(id.project, storage.value.storageValue.readPermission) - s = fetchFile(storage.value, attributes, file.id) + storage <- fetchStorage.onRead(file.value.storage, id.project) + s = fetchFile(storage, attributes, file.id) mediaType = attributes.mediaType.getOrElse(`application/octet-stream`) } yield FileResponse( attributes.filename, @@ -479,49 +466,35 @@ final class Files( FetchRejection(fileId, storage.id, e) } - override def fetch(id: FileId): IO[FileResource] = - (for { - (iri, _) <- id.expandIri(fetchContext.onRead) - state <- fetchState(id, iri) - } yield state.toResource).span("fetchFile") - - private def fetchState(id: FileId, iri: Iri): IO[FileState] = { - val notFound = FileNotFound(iri, id.project) - id.id match { - case Latest(_) => log.stateOr(id.project, iri, notFound) - case Revision(_, rev) => log.stateOr(id.project, iri, rev, notFound, RevisionNotFound) - case Tag(_, tag) => log.stateOr(id.project, iri, tag, notFound, TagNotFound(tag)) + def fetch(id: FileId): IO[FileResource] = + id.expandRef(fetchContext.onRead).flatMap { fetch(_, id.project) } + + def fetch(resourceRef: ResourceRef, project: ProjectRef): IO[FileResource] = + fetchState(resourceRef, project).map(_.toResource).span("fetchFile") + + private[files] def fetchState(resourceRef: ResourceRef, project: ProjectRef): IO[FileState] = { + resourceRef match { + case ResourceRef.Latest(iri) => log.stateOr(project, iri, FileNotFound(iri, project)) + case ResourceRef.Revision(_, iri, rev) => + log.stateOr(project, iri, rev, FileNotFound(iri, project), RevisionNotFound) + case ResourceRef.Tag(_, iri, tag) => log.stateOr(project, iri, tag, FileNotFound(iri, project), TagNotFound(tag)) } } private def createLegacyLink( iri: Iri, - ref: ProjectRef, - pc: ProjectContext, - storageId: Option[IdSegment], + project: ProjectRef, + storageIri: Option[Iri], description: FileDescription, path: Uri.Path, tag: Option[UserTag] )(implicit caller: Caller): IO[FileResource] = for { - _ <- test(CreateFile(iri, ref, testStorageRef, testStorageType, testAttributes, caller.subject, tag)) - (storageRef, storage) <- fetchAndValidateActiveStorage(storageId, ref, pc) + _ <- test(CreateFile(iri, project, testStorageRef, testStorageType, testAttributes, caller.subject, tag)) + (storageRef, storage) <- fetchStorage.onWrite(storageIri, project) storageMetadata <- legacyLinkFile(storage, path, description.filename, iri) - res <- eval( - CreateFile( - iri, - ref, - storageRef, - storage.tpe, - FileAttributes - .from( - description, - storageMetadata - ), - caller.subject, - tag - ) - ) + fileAttributes = FileAttributes.from(description, storageMetadata) + res <- eval(CreateFile(iri, project, storageRef, storage.tpe, fileAttributes, caller.subject, tag)) } yield res private def legacyLinkFile( @@ -538,31 +511,6 @@ final class Files( private def test(cmd: FileCommand) = log.dryRun(cmd.project, cmd.id, cmd) - override def fetchAndValidateActiveStorage(storageIdOpt: Option[IdSegment], ref: ProjectRef, pc: ProjectContext)( - implicit caller: Caller - ): IO[(ResourceRef.Revision, Storage)] = - storageIdOpt match { - case Some(storageId) => - for { - iri <- expandStorageIri(storageId, pc) - storage <- storages.fetch(ResourceRef(iri), ref) - _ <- IO.whenA(storage.deprecated)(IO.raiseError(WrappedStorageRejection(StorageIsDeprecated(iri)))) - _ <- validateAuth(ref, storage.value.storageValue.writePermission) - } yield ResourceRef.Revision(storage.id, storage.rev) -> storage.value - case None => - for { - storage <- fetchDefaultStorage(ref) - _ <- validateAuth(ref, storage.value.storageValue.writePermission) - } yield ResourceRef.Revision(storage.id, storage.rev) -> storage.value - } - - private def fetchDefaultStorage(ref: ProjectRef) = storages.fetchDefault(ref).adaptError { case e: StorageRejection => - WrappedStorageRejection(e) - } - - private def validateAuth(project: ProjectRef, permission: Permission)(implicit c: Caller): IO[Unit] = - aclCheck.authorizeForOr(project, permission)(AuthorizationFailed(project, permission)) - private def saveFileToStorage(iri: Iri, storage: Storage, uploadRequest: FileUploadRequest): IO[FileAttributes] = { for { info <- formDataExtractor(iri, uploadRequest.entity, storage.storageValue.maxFileSize) @@ -571,25 +519,11 @@ final class Files( } yield FileAttributes.from(description, storageMetadata) }.adaptError { case e: SaveFileRejection => SaveRejection(iri, storage.id, e) } - private def expandStorageIri(segment: IdSegment, pc: ProjectContext): IO[Iri] = - Storages.expandIri(segment, pc).adaptError { case s: StorageRejection => - WrappedStorageRejection(s) - } - - private def generateId(pc: ProjectContext): IO[Iri] = + private def generateId(pc: ProjectContext): IO[Iri] = uuidF().map(uuid => pc.base.iri / uuid.toString) def states(offset: Offset): SuccessElemStream[FileState] = log.states(Scope.root, offset) - private[files] def updateAttributes(iri: Iri, project: ProjectRef): IO[Unit] = - for { - f <- log.stateOr(project, iri, FileNotFound(iri, project)) - storage <- storages.fetch(IdSegmentRef(f.storage), f.project).map(_.value).adaptError { - case e: StorageFetchRejection => WrappedStorageRejection(e) - } - _ <- updateAttributes(f: FileState, storage: Storage) - } yield () - private[files] def updateAttributes(f: FileState, storage: Storage): IO[Unit] = { val attr = f.attributes for { @@ -627,6 +561,8 @@ object Files { type FilesLog = ScopedEventLog[Iri, FileState, FileCommand, FileEvent, FileRejection] + def expandStorageIri(segment: IdSegment, pc: ProjectContext): IO[Iri] = Storages.expandIri(segment, pc) + object FilesLog { def eval(log: FilesLog)(cmd: FileCommand): IO[FileResource] = log.evaluate(cmd.project, cmd.id, cmd).map(_._2.toResource) @@ -839,8 +775,7 @@ object Files { */ def apply( fetchContext: FetchContext, - aclCheck: AclCheck, - storages: FetchStorage, + fetchStorage: FetchStorage, xas: Transactors, config: FilesConfig, fileOps: FileOperations, @@ -853,9 +788,8 @@ object Files { new Files( FormDataExtractor(config.mediaTypeDetector), ScopedEventLog(definition(clock), config.eventLog, xas), - aclCheck, fetchContext, - storages, + fetchStorage, fileOps ) } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchCopy.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchCopy.scala deleted file mode 100644 index 249719dcb2..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchCopy.scala +++ /dev/null @@ -1,121 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch - -import cats.data.NonEmptyList -import cats.effect.IO -import cats.implicits.toFunctorOps -import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.FetchFileResource -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.CopyFileSource -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.FetchStorage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, RemoteDiskStorage} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{Storage, StorageType} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.CopyFileRejection -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.CopyFileRejection._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.{DiskCopyDetails, DiskStorageCopyFiles} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.RemoteDiskStorageCopyFiles -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.RemoteDiskCopyDetails -import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck -import ch.epfl.bluebrain.nexus.delta.sdk.error.ServiceError.AuthorizationFailed -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller -import shapeless.syntax.typeable.typeableOps - -trait BatchCopy { - def copyFiles(source: CopyFileSource, destStorage: Storage)(implicit - c: Caller - ): IO[NonEmptyList[FileAttributes]] -} - -object BatchCopy { - def mk( - fetchFile: FetchFileResource, - fetchStorage: FetchStorage, - aclCheck: AclCheck, - diskCopy: DiskStorageCopyFiles, - remoteDiskCopy: RemoteDiskStorageCopyFiles - )(implicit uuidF: UUIDF): BatchCopy = new BatchCopy { - - override def copyFiles(source: CopyFileSource, destStorage: Storage)(implicit - c: Caller - ): IO[NonEmptyList[FileAttributes]] = - destStorage match { - case disk: Storage.DiskStorage => copyToDiskStorage(source, disk) - case remote: Storage.RemoteDiskStorage => copyToRemoteStorage(source, remote) - case s3: Storage.S3Storage => unsupported(s3.tpe) - } - - private def copyToRemoteStorage(source: CopyFileSource, dest: RemoteDiskStorage)(implicit c: Caller) = - for { - remoteCopyDetails <- source.files.traverse(fetchRemoteCopyDetails(dest, _)) - _ <- validateFilesForStorage(dest, remoteCopyDetails.map(_.sourceMetadata.bytes)) - attributes <- remoteDiskCopy.copyFiles(dest, remoteCopyDetails) - } yield { - attributes - } - - private def copyToDiskStorage(source: CopyFileSource, dest: DiskStorage)(implicit - c: Caller - ): IO[NonEmptyList[FileAttributes]] = - for { - diskCopyDetails <- source.files.traverse(fetchDiskCopyDetails(dest, _)) - _ <- validateFilesForStorage(dest, diskCopyDetails.map(_.sourceAttributes.bytes)) - destAttributes <- diskCopy.copyFiles(dest, diskCopyDetails) - } yield { - destAttributes - } - - private def validateFilesForStorage(destStorage: Storage, sourcesBytes: NonEmptyList[Long]): IO[Unit] = { - val maxSize = destStorage.storageValue.maxFileSize - IO.raiseWhen(sourcesBytes.exists(_ > maxSize))(SourceFileTooLarge(maxSize, destStorage.id)) - } - - private def fetchDiskCopyDetails(destStorage: DiskStorage, fileId: FileId)(implicit - c: Caller - ): IO[DiskCopyDetails] = - for { - (file, sourceStorage) <- fetchFileAndValidateStorage(fileId) - _ <- validateDiskStorage(destStorage, sourceStorage) - } yield DiskCopyDetails(destStorage, file.attributes) - - private def validateDiskStorage(destStorage: DiskStorage, sourceStorage: Storage) = - sourceStorage - .narrowTo[DiskStorage] - .as(IO.unit) - .getOrElse(differentStorageTypeError(destStorage, sourceStorage)) - - private def fetchRemoteCopyDetails(destStorage: RemoteDiskStorage, fileId: FileId)(implicit c: Caller) = - for { - (file, sourceStorage) <- fetchFileAndValidateStorage(fileId) - sourceBucket <- validateRemoteStorage(destStorage, sourceStorage) - uuid <- uuidF() - } yield RemoteDiskCopyDetails( - uuid, - destStorage, - file.attributes.path, - sourceBucket, - FileMetadata.from(file.attributes), - FileDescription.from(file) - ) - - private def validateRemoteStorage(destStorage: RemoteDiskStorage, sourceStorage: Storage) = - sourceStorage - .narrowTo[RemoteDiskStorage] - .map(remote => IO.pure(remote.value.folder)) - .getOrElse(differentStorageTypeError(destStorage, sourceStorage)) - - private def differentStorageTypeError[A](destStorage: Storage, sourceStorage: Storage) = - IO.raiseError[A](DifferentStorageTypes(sourceStorage.id, sourceStorage.tpe, destStorage.tpe)) - - private def unsupported(tpe: StorageType) = IO.raiseError(CopyFileRejection.UnsupportedOperation(tpe)) - - private def fetchFileAndValidateStorage(id: FileId)(implicit c: Caller) = { - for { - file <- fetchFile.fetch(id) - sourceStorage <- fetchStorage.fetch(file.value.storage, id.project) - perm = sourceStorage.value.storageValue.readPermission - _ <- aclCheck.authorizeForOr(id.project, perm)(AuthorizationFailed(id.project, perm)) - } yield (file.value, sourceStorage.value) - } - } - -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchFiles.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchFiles.scala deleted file mode 100644 index 1e8607b567..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchFiles.scala +++ /dev/null @@ -1,89 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch - -import cats.data.NonEmptyList -import cats.effect.IO -import cats.implicits._ -import ch.epfl.bluebrain.nexus.delta.kernel.Logger -import ch.epfl.bluebrain.nexus.delta.kernel.kamon.KamonMetricComponent -import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.Files.entityType -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileCommand._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.CopyRejection -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.CopyFileSource -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{FetchFileStorage, FileResource} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.CopyFileRejection -import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller -import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ -import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContext -import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ProjectContext -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ResourceRef - -trait BatchFiles { - def copyFiles( - source: CopyFileSource, - dest: CopyFileDestination - )(implicit c: Caller): IO[NonEmptyList[FileResource]] -} - -object BatchFiles { - def mk( - fetchFileStorage: FetchFileStorage, - fetchContext: FetchContext, - evalFileCommand: CreateFile => IO[FileResource], - batchCopy: BatchCopy - )(implicit uuidF: UUIDF): BatchFiles = new BatchFiles { - - private val logger = Logger[BatchFiles] - - implicit private val kamonComponent: KamonMetricComponent = KamonMetricComponent(entityType.value) - - override def copyFiles(source: CopyFileSource, dest: CopyFileDestination)(implicit - c: Caller - ): IO[NonEmptyList[FileResource]] = { - for { - pc <- fetchContext.onCreate(dest.project) - (destStorageRef, destStorage) <- fetchFileStorage.fetchAndValidateActiveStorage(dest.storage, dest.project, pc) - destMetadata <- batchCopy.copyFiles(source, destStorage).adaptError { case e: CopyFileRejection => - CopyRejection(source.project, dest.project, destStorage.id, e) - } - fileResources <- createFileResources(pc, dest, destStorageRef, destStorage.tpe, destMetadata) - } yield fileResources - }.span("copyFiles") - - private def createFileResources( - pc: ProjectContext, - dest: CopyFileDestination, - destStorageRef: ResourceRef.Revision, - destStorageTpe: StorageType, - destFilesAttributes: NonEmptyList[FileAttributes] - )(implicit c: Caller): IO[NonEmptyList[FileResource]] = - destFilesAttributes.traverse { case destMetadata => - for { - iri <- generateId(pc) - command = - CreateFile( - iri, - dest.project, - destStorageRef, - destStorageTpe, - destMetadata, - c.subject, - dest.tag - ) - resource <- evalCreateCommand(command) - } yield resource - } - - private def generateId(pc: ProjectContext): IO[Iri] = - uuidF().map(uuid => pc.base.iri / uuid.toString) - - private def evalCreateCommand(command: CreateFile) = - evalFileCommand(command).onError { e => - logger.error(e)(s"Failed storing file copy event, file must be manually deleted: $command") - } - } - -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/CopyFileDestination.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/CopyFileDestination.scala deleted file mode 100644 index 03426b5b0b..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/CopyFileDestination.scala +++ /dev/null @@ -1,21 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model - -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag - -/** - * Details for the files we're creating in the copy - * - * @param project - * Orgnization and project for the new file - * @param storage - * Optional storage for the new file which must have the same type as the source file's storage - * @param tag - * Optional tag to create the new file with - */ -final case class CopyFileDestination( - project: ProjectRef, - storage: Option[IdSegment], - tag: Option[UserTag] -) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileId.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileId.scala index a55f658f1f..256cefd87e 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileId.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileId.scala @@ -13,6 +13,18 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ProjectRef, ResourceRef} final case class FileId(id: IdSegmentRef, project: ProjectRef) { def expandIri(fetchContext: ProjectRef => IO[ProjectContext]): IO[(Iri, ProjectContext)] = fetchContext(project).flatMap(pc => iriExpander(id.value, pc).map(iri => (iri, pc))) + + def expandRef(fetchContext: ProjectRef => IO[ProjectContext]): IO[ResourceRef] = + fetchContext(project).flatMap { pc => + iriExpander(id.value, pc).map { iri => + (iri, pc) + id match { + case IdSegmentRef.Latest(_) => ResourceRef.Latest(iri) + case IdSegmentRef.Revision(_, rev) => ResourceRef.Revision(iri, rev) + case IdSegmentRef.Tag(_, tag) => ResourceRef.Tag(iri, tag) + } + } + } } object FileId { diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala index 4d2c232e5f..ca1f75cef1 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala @@ -4,9 +4,8 @@ import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server.{Rejection => AkkaRejection} import ch.epfl.bluebrain.nexus.delta.kernel.error.Rejection import ch.epfl.bluebrain.nexus.delta.kernel.utils.ClassUtils -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{CopyFileRejection, FetchFileRejection, SaveFileRejection} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{FetchFileRejection, SaveFileRejection} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue @@ -153,14 +152,6 @@ object FileRejection { */ final case class WrappedAkkaRejection(rejection: AkkaRejection) extends FileRejection(rejection.toString) - /** - * Rejection returned when interacting with the storage operations bundle to fetch a storage - * - * @param rejection - * the rejection which occurred with the storage - */ - final case class WrappedStorageRejection(rejection: StorageRejection) extends FileRejection(rejection.reason) - /** * Rejection returned when interacting with the storage operations bundle to fetch a file from a storage * @@ -231,23 +222,12 @@ object FileRejection { s"Linking a file cannot be performed without a 'filename' or a 'path' that does not end with a filename." ) - final case class CopyRejection( - sourceProj: ProjectRef, - destProject: ProjectRef, - destStorageId: Iri, - rejection: CopyFileRejection - ) extends FileRejection( - s"Failed to copy files from $sourceProj to storage $destStorageId in project $destProject", - Some(rejection.loggedDetails) - ) - implicit val fileRejectionEncoder: Encoder.AsObject[FileRejection] = Encoder.AsObject.instance { r => val tpe = ClassUtils.simpleName(r) val obj = JsonObject(keywords.tpe -> tpe.asJson, "reason" -> r.reason.asJson) r match { case WrappedAkkaRejection(rejection) => rejection.asJsonObject - case WrappedStorageRejection(rejection) => rejection.asJsonObject case SaveRejection(_, _, rejection) => obj.add(keywords.tpe, ClassUtils.simpleName(rejection).asJson).add("details", rejection.loggedDetails.asJson) case FetchRejection(_, _, rejection) => @@ -256,8 +236,6 @@ object FileRejection { obj.add(keywords.tpe, ClassUtils.simpleName(rejection).asJson).add("details", rejection.loggedDetails.asJson) case LinkRejection(_, _, rejection) => obj.add(keywords.tpe, ClassUtils.simpleName(rejection).asJson).add("details", rejection.loggedDetails.asJson) - case CopyRejection(_, _, _, rejection) => - obj.add(keywords.tpe, ClassUtils.simpleName(rejection).asJson).add("details", rejection.loggedDetails.asJson) case IncorrectRev(provided, expected) => obj.add("provided", provided.asJson).add("expected", expected.asJson) case _: FileNotFound => obj.add(keywords.tpe, "ResourceNotFound".asJson) case _ => obj @@ -276,13 +254,11 @@ object FileRejection { case IncorrectRev(_, _) => (StatusCodes.Conflict, Seq.empty) case FileTooLarge(_) => (StatusCodes.PayloadTooLarge, Seq.empty) case WrappedAkkaRejection(rej) => (rej.status, rej.headers) - case WrappedStorageRejection(rej) => (rej.status, rej.headers) // If this happens it signifies a system problem rather than the user having made a mistake case FetchRejection(_, _, FetchFileRejection.FileNotFound(_)) => (StatusCodes.InternalServerError, Seq.empty) case SaveRejection(_, _, SaveFileRejection.ResourceAlreadyExists(_)) => (StatusCodes.Conflict, Seq.empty) case SaveRejection(_, _, SaveFileRejection.BucketAccessDenied(_, _, _)) => (StatusCodes.Forbidden, Seq.empty) case SaveRejection(_, _, SaveFileRejection.FileContentLengthIsMissing) => (StatusCodes.BadRequest, Seq.empty) - case CopyRejection(_, _, _, rejection) => (rejection.status, Seq.empty) case FetchRejection(_, _, _) => (StatusCodes.InternalServerError, Seq.empty) case SaveRejection(_, _, _) => (StatusCodes.InternalServerError, Seq.empty) case _ => (StatusCodes.BadRequest, Seq.empty) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/BatchFilesRoutes.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/BatchFilesRoutes.scala deleted file mode 100644 index 408f54c14c..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/BatchFilesRoutes.scala +++ /dev/null @@ -1,80 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes - -import akka.http.scaladsl.model.StatusCodes.Created -import akka.http.scaladsl.server._ -import cats.data.EitherT -import cats.effect.IO -import cats.syntax.all._ -import ch.epfl.bluebrain.nexus.delta.kernel.Logger -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch.BatchFiles -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{CopyFileDestination, File, FileRejection} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.permissions.{read => Read} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{contexts, FileResource} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.ShowFileLocation -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder -import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering -import ch.epfl.bluebrain.nexus.delta.sdk._ -import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck -import ch.epfl.bluebrain.nexus.delta.sdk.circe.CirceUnmarshalling -import ch.epfl.bluebrain.nexus.delta.sdk.directives.AuthDirectives -import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaDirectives._ -import ch.epfl.bluebrain.nexus.delta.sdk.error.ServiceError.AuthorizationFailed -import ch.epfl.bluebrain.nexus.delta.sdk.identities.Identities -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller -import ch.epfl.bluebrain.nexus.delta.sdk.jsonld.BulkOperationResults -import ch.epfl.bluebrain.nexus.delta.sdk.model.{BaseUri, IdSegment} - -final class BatchFilesRoutes( - identities: Identities, - aclCheck: AclCheck, - batchFiles: BatchFiles, - index: IndexingAction.Execute[File] -)(implicit - baseUri: BaseUri, - showLocation: ShowFileLocation, - cr: RemoteContextResolution, - ordering: JsonKeyOrdering -) extends AuthDirectives(identities, aclCheck) - with CirceUnmarshalling { self => - - private val logger = Logger[BatchFilesRoutes] - - implicit val bulkOpJsonLdEnc: JsonLdEncoder[BulkOperationResults[FileResource]] = - BulkOperationResults.searchResultsJsonLdEncoder(ContextValue(contexts.files)) - - def routes: Route = - baseUriPrefix(baseUri.prefix) { - pathPrefix("bulk") { - pathPrefix("files") { - extractCaller { implicit caller => - projectRef { project => - (post & pathEndOrSingleSlash & parameter("storage".as[IdSegment].?) & indexingMode & tagParam) { - (storage, mode, tag) => - // Bulk create files by copying from another project - entity(as[CopyFileSource]) { c: CopyFileSource => - val copyTo = CopyFileDestination(project, storage, tag) - emit(Created, copyFiles(mode, c, copyTo)) - } - } - } - } - } - } - } - - private def copyFiles(mode: IndexingMode, source: CopyFileSource, dest: CopyFileDestination)(implicit - caller: Caller - ): IO[Either[FileRejection, BulkOperationResults[FileResource]]] = - (for { - _ <- - EitherT.right(aclCheck.authorizeForOr(source.project, Read)(AuthorizationFailed(source.project.project, Read))) - results <- EitherT(batchFiles.copyFiles(source, dest).attemptNarrow[FileRejection]) - _ <- EitherT.right[FileRejection](results.traverse(index(dest.project, _, mode))) - _ <- EitherT.right[FileRejection](logger.info(s"Bulk file copy succeeded with results: $results")) - } yield BulkOperationResults(results.toList)) - .onError(e => - EitherT.right(logger.error(e)(s"Bulk file copy operation failed for source $source and destination $dest")) - ) - .value -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/CopyFileSource.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/CopyFileSource.scala deleted file mode 100644 index c259fa68e8..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/CopyFileSource.scala +++ /dev/null @@ -1,42 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes - -import cats.data.NonEmptyList -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileId -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag -import io.circe.{Decoder, DecodingFailure, Json} - -final case class CopyFileSource( - project: ProjectRef, - files: NonEmptyList[FileId] -) - -object CopyFileSource { - - implicit val dec: Decoder[CopyFileSource] = Decoder.instance { cur => - def parseSingle(j: Json, proj: ProjectRef): Decoder.Result[FileId] = - for { - sourceFile <- j.hcursor.get[String]("sourceFileId").map(IdSegment(_)) - sourceTag <- j.hcursor.get[Option[UserTag]]("sourceTag") - sourceRev <- j.hcursor.get[Option[Int]]("sourceRev") - fileId <- parseFileId(sourceFile, proj, sourceTag, sourceRev) - } yield fileId - - def parseFileId(id: IdSegment, proj: ProjectRef, sourceTag: Option[UserTag], sourceRev: Option[Int]) = - (sourceTag, sourceRev) match { - case (Some(tag), None) => Right(FileId(id, tag, proj)) - case (None, Some(rev)) => Right(FileId(id, rev, proj)) - case (None, None) => Right(FileId(id, proj)) - case (Some(_), Some(_)) => - Left( - DecodingFailure("Tag and revision cannot be simultaneously present for source file lookup", Nil) - ) - } - - for { - sourceProj <- cur.get[ProjectRef]("sourceProjectRef") - files <- cur.get[NonEmptyList[Json]]("files").flatMap(_.traverse(parseSingle(_, sourceProj))) - } yield CopyFileSource(sourceProj, files) - } -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala index 39f68d08ba..dcea506b0b 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala @@ -11,13 +11,14 @@ import cats.syntax.all._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.permissions.{read => Read, write => Write} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.FileUriDirectives._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.FilesRoutes.LinkFileRequest.{fileDescriptionFromRequest, linkFileDecoder} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.FilesRoutes._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{schemas, FileResource, Files} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragePluginExceptionHandler.handleStorageExceptions import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.ShowFileLocation import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.RemoteContextResolution import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering -import FileUriDirectives._ import ch.epfl.bluebrain.nexus.delta.sdk._ import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck import ch.epfl.bluebrain.nexus.delta.sdk.circe.CirceUnmarshalling @@ -28,8 +29,8 @@ import ch.epfl.bluebrain.nexus.delta.sdk.fusion.FusionConfig import ch.epfl.bluebrain.nexus.delta.sdk.identities.Identities import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ -import ch.epfl.bluebrain.nexus.delta.sdk.model.routes.Tag import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri +import ch.epfl.bluebrain.nexus.delta.sdk.model.routes.Tag import io.circe.generic.extras.Configuration import io.circe.generic.extras.semiauto.deriveConfiguredDecoder import io.circe.{parser, Decoder} @@ -67,7 +68,7 @@ final class FilesRoutes( def routes: Route = (baseUriPrefix(baseUri.prefix) & replaceUri("files", schemas.files)) { - pathPrefix("files") { + (handleStorageExceptions & pathPrefix("files")) { extractCaller { implicit caller => projectRef { project => implicit class IndexOps(io: IO[FileResource]) { @@ -86,14 +87,13 @@ final class FilesRoutes( .createLegacyLink(storage, project, desc, linkRequest.path, tag) .index(mode) } - .attemptNarrow[FileRejection] ) }, // Create a file without id segment uploadRequest { request => emit( Created, - files.create(storage, project, request, tag).index(mode).attemptNarrow[FileRejection] + files.create(storage, project, request, tag).index(mode) ) } ) @@ -123,7 +123,6 @@ final class FilesRoutes( ) .index(mode) } - .attemptNarrow[FileRejection] ) }, // Update a file @@ -132,22 +131,14 @@ final class FilesRoutes( files .update(fileId, storage, rev, request, tag) .index(mode) - .attemptNarrow[FileRejection] ) }, // Update custom metadata (requestEntityEmpty & extractFileMetadata & authorizeFor(project, Write)) { case Some(FileCustomMetadata.empty) => - emit( - IO.raiseError[FileResource](EmptyCustomMetadata).attemptNarrow[FileRejection] - ) + emit(IO.raiseError[FileResource](EmptyCustomMetadata)) case Some(metadata) => - emit( - files - .updateMetadata(fileId, rev, metadata, tag) - .index(mode) - .attemptNarrow[FileRejection] - ) + emit(files.updateMetadata(fileId, rev, metadata, tag).index(mode)) case None => reject } ) @@ -164,7 +155,6 @@ final class FilesRoutes( .createLegacyLink(fileId, storage, description, linkRequest.path, tag) .index(mode) } - .attemptNarrow[FileRejection] ) }, // Create a file with id segment @@ -174,7 +164,6 @@ final class FilesRoutes( files .create(fileId, storage, request, tag) .index(mode) - .attemptNarrow[FileRejection] ) } ) @@ -215,10 +204,7 @@ final class FilesRoutes( (post & revParam & pathEndOrSingleSlash) { rev => authorizeFor(project, Write).apply { entity(as[Tag]) { case Tag(tagRev, tag) => - emit( - Created, - files.tag(fileId, tag, tagRev, rev).index(mode).attemptNarrow[FileRejection] - ) + emit(Created, files.tag(fileId, tag, tagRev, rev).index(mode)) } } }, diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/FetchStorage.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/FetchStorage.scala index d42160c0cc..3512017226 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/FetchStorage.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/FetchStorage.scala @@ -1,45 +1,54 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages import cats.effect.IO -import cats.implicits.catsSyntaxMonadError -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.WrappedStorageRejection -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageFetchRejection -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageIsDeprecated +import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri +import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck +import ch.epfl.bluebrain.nexus.delta.sdk.error.ServiceError.AuthorizationFailed +import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller +import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ResourceRef.Latest import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ProjectRef, ResourceRef} trait FetchStorage { /** - * Fetch the storage using the ''resourceRef'' - * - * @param resourceRef - * the storage reference (Latest, Revision or Tag) - * @param project - * the project where the storage belongs + * Attempts to fetch the storage in a read context and validates if the current user has access to it */ - final def fetch[R <: Throwable]( - resourceRef: ResourceRef, - project: ProjectRef - ): IO[StorageResource] = - fetch(IdSegmentRef(resourceRef), project).adaptError { case err: StorageFetchRejection => - WrappedStorageRejection(err) - } + def onRead(id: ResourceRef, project: ProjectRef)(implicit caller: Caller): IO[Storage] /** - * Fetch the last version of a storage - * - * @param id - * the identifier that will be expanded to the Iri of the storage with its optional rev/tag - * @param project - * the project where the storage belongs + * Attempts to fetch the provided storage or the default one in a write context */ - def fetch(id: IdSegmentRef, project: ProjectRef): IO[StorageResource] + def onWrite(id: Option[Iri], project: ProjectRef)(implicit + caller: Caller + ): IO[(ResourceRef.Revision, Storage)] +} + +object FetchStorage { + + def apply(storages: Storages, aclCheck: AclCheck): FetchStorage = new FetchStorage { + + override def onRead(id: ResourceRef, project: ProjectRef)(implicit caller: Caller): IO[Storage] = + storages.fetch(id, project).map(_.value).flatTap { storage => + validateAuth(project, storage.storageValue.readPermission) + } + + override def onWrite(id: Option[Iri], project: ProjectRef)(implicit + caller: Caller + ): IO[(ResourceRef.Revision, Storage)] = + for { + storage <- id match { + case Some(id) => storages.fetch(Latest(id), project) + case None => storages.fetchDefault(project) + } + _ <- IO.raiseWhen(storage.deprecated)(StorageIsDeprecated(storage.id)) + _ <- validateAuth(project, storage.value.storageValue.writePermission) + } yield ResourceRef.Revision(storage.id, storage.rev) -> storage.value + + private def validateAuth(project: ProjectRef, permission: Permission)(implicit c: Caller): IO[Unit] = + aclCheck.authorizeForOr(project, permission)(AuthorizationFailed(project, permission)) + } - /** - * Fetches the default storage for a project. - * - * @param project - * the project where to look for the default storage - */ - def fetchDefault(project: ProjectRef): IO[StorageResource] } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragePluginExceptionHandler.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragePluginExceptionHandler.scala new file mode 100644 index 0000000000..f11c6cc181 --- /dev/null +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragePluginExceptionHandler.scala @@ -0,0 +1,27 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages + +import akka.http.scaladsl.server.Directives.handleExceptions +import akka.http.scaladsl.server.{Directive0, ExceptionHandler} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.RemoteContextResolution +import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering +import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaDirectives.discardEntityAndForceEmit +import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.RdfExceptionHandler +import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri + +object StoragePluginExceptionHandler { + + def apply(implicit baseUri: BaseUri, cr: RemoteContextResolution, ordering: JsonKeyOrdering): ExceptionHandler = + ExceptionHandler { + case err: StorageRejection => discardEntityAndForceEmit(err) + case err: FileRejection => discardEntityAndForceEmit(err) + }.withFallback(RdfExceptionHandler.apply) + + def handleStorageExceptions(implicit + baseUri: BaseUri, + cr: RemoteContextResolution, + ordering: JsonKeyOrdering + ): Directive0 = handleExceptions(StoragePluginExceptionHandler.apply) + +} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala index 274456f632..d3a616ffbc 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala @@ -21,7 +21,6 @@ import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.{Caller, ServiceAccoun import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.jsonld.ExpandIri import ch.epfl.bluebrain.nexus.delta.sdk.jsonld.JsonLdSourceProcessor.JsonLdSourceResolvingDecoder -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef.{Latest, Revision, Tag} import ch.epfl.bluebrain.nexus.delta.sdk.model._ import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContext @@ -29,7 +28,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ApiMappings import ch.epfl.bluebrain.nexus.delta.sdk.resolvers.ResolverContextResolution import ch.epfl.bluebrain.nexus.delta.sourcing._ import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Subject -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, ProjectRef, SuccessElemStream} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, ProjectRef, ResourceRef, SuccessElemStream} import fs2.Stream import io.circe.Json import org.typelevel.log4cats @@ -44,7 +43,7 @@ final class Storages private ( fetchContext: FetchContext, sourceDecoder: JsonLdSourceResolvingDecoder[StorageFields], serviceAccount: ServiceAccount -) extends FetchStorage { +) { implicit private val kamonComponent: KamonMetricComponent = KamonMetricComponent(entityType.value) @@ -225,25 +224,34 @@ final class Storages private ( } yield res }.span("undeprecateStorage") - override def fetch(id: IdSegmentRef, project: ProjectRef): IO[StorageResource] = { + def fetch(idSegment: IdSegmentRef, project: ProjectRef): IO[StorageResource] = { for { - pc <- fetchContext.onRead(project) - iri <- expandIri(id.value, pc) - notFound = StorageNotFound(iri, project) - state <- id match { - case Latest(_) => log.stateOr(project, iri, notFound) - case Revision(_, rev) => log.stateOr(project, iri, rev, notFound, RevisionNotFound) - case t: Tag => IO.raiseError(FetchByTagNotSupported(t)) - } - } yield state.toResource - }.span("fetchStorage") + pc <- fetchContext.onRead(project) + id <- expandIri(idSegment.value, pc) + resourceRef = idSegment match { + case IdSegmentRef.Latest(_) => ResourceRef.Latest(id) + case IdSegmentRef.Revision(_, rev) => ResourceRef.Revision(id, rev) + case IdSegmentRef.Tag(_, tag) => ResourceRef.Tag(id, tag) + } + storage <- fetch(resourceRef, project) + } yield storage + } + + def fetch(resourceRef: ResourceRef, project: ProjectRef): IO[StorageResource] = { + resourceRef match { + case ResourceRef.Latest(id) => log.stateOr(project, id, StorageNotFound(id, project)) + case ResourceRef.Revision(_, id, rev) => + log.stateOr(project, id, rev, StorageNotFound(id, project), RevisionNotFound) + case t: ResourceRef.Tag => IO.raiseError(FetchByTagNotSupported(t)) + } + }.map(_.toResource).span("fetchStorage") private def fetchDefaults(project: ProjectRef): Stream[IO, StorageResource] = log .currentStates(Scope.Project(project), _.toResource) .filter(_.value.default) - override def fetchDefault(project: ProjectRef): IO[StorageResource] = { + def fetchDefault(project: ProjectRef): IO[StorageResource] = { for { defaultOpt <- fetchDefaults(project).reduce(updatedByDesc.min(_, _)).head.compile.last default <- IO.fromOption(defaultOpt)(DefaultStorageNotFound(project)) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala index 122c27fe7a..2ff24fcd9a 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala @@ -18,7 +18,7 @@ sealed trait Storage extends Product with Serializable { /** * @return - * the view id + * the storage id */ def id: Iri diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageRejection.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageRejection.scala index 4ca446ef31..35fd94dc7e 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageRejection.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageRejection.scala @@ -9,9 +9,8 @@ import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.HttpResponseFields -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ProjectRef, ResourceRef} import io.circe.syntax._ import io.circe.{Encoder, JsonObject} @@ -42,9 +41,9 @@ object StorageRejection { final case class RevisionNotFound(provided: Int, current: Int) extends StorageFetchRejection(s"Revision requested '$provided' not found, last known revision is '$current'.") - final case class FetchByTagNotSupported(tag: IdSegmentRef.Tag) + final case class FetchByTagNotSupported(tag: ResourceRef.Tag) extends StorageFetchRejection( - s"Fetching storages by tag is no longer supported. Id ${tag.value.asString} and tag ${tag.tag.value}" + s"Fetching storages by tag is no longer supported. Id ${tag.iri} and tag ${tag.tag.value}" ) /** diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala index afbeb28eae..fd2344c2b2 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala @@ -1,12 +1,9 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations -import akka.http.scaladsl.model.{StatusCodes, Uri} +import akka.http.scaladsl.model.Uri import ch.epfl.bluebrain.nexus.delta.kernel.error.Rejection import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType -import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.sdk.NexusHeaders -import ch.epfl.bluebrain.nexus.delta.sdk.http.HttpClientError -import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.HttpResponseFields /** * Enumeration of Storage rejections related to file operations. @@ -70,43 +67,6 @@ object StorageFileRejection { extends FetchAttributeRejection(rejection.loggedDetails) } - sealed abstract class CopyFileRejection(loggedDetails: String) extends StorageFileRejection(loggedDetails) - - object CopyFileRejection { - final case class UnsupportedOperation(tpe: StorageType) - extends CopyFileRejection( - s"Copying a file attributes is not supported for storages of type '${tpe.iri}'" - ) - - final case class SourceFileTooLarge(maxSize: Long, storageId: Iri) - extends CopyFileRejection( - s"Source file size exceeds maximum $maxSize on destination storage $storageId" - ) - - final case class TotalCopySizeTooLarge(totalSize: Long, spaceLeft: Long, storageId: Iri) - extends CopyFileRejection( - s"Combined size of source files ($totalSize) exceeds space ($spaceLeft) on destination storage $storageId" - ) - - final case class RemoteDiskClientError(underlying: HttpClientError) - extends CopyFileRejection( - s"Error from remote disk storage client: ${underlying.asString}" - ) - - final case class DifferentStorageTypes(id: Iri, source: StorageType, dest: StorageType) - extends CopyFileRejection( - s"Source storage $id of type $source cannot be different to the destination storage type $dest" - ) - - implicit val statusCodes: HttpResponseFields[CopyFileRejection] = HttpResponseFields { - case _: UnsupportedOperation => StatusCodes.BadRequest - case _: SourceFileTooLarge => StatusCodes.BadRequest - case _: TotalCopySizeTooLarge => StatusCodes.BadRequest - case _: DifferentStorageTypes => StatusCodes.BadRequest - case _: RemoteDiskClientError => StatusCodes.InternalServerError - } - } - /** * Rejection returned when a storage cannot save a file */ diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskCopyDetails.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskCopyDetails.scala deleted file mode 100644 index 6a2082f602..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskCopyDetails.scala +++ /dev/null @@ -1,9 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk - -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.LimitedFileAttributes -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.DiskStorage - -final case class DiskCopyDetails( - destStorage: DiskStorage, - sourceAttributes: LimitedFileAttributes -) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageCopyFiles.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageCopyFiles.scala deleted file mode 100644 index cd4671543b..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageCopyFiles.scala +++ /dev/null @@ -1,62 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk - -import akka.http.scaladsl.model.Uri -import cats.data.NonEmptyList -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.kernel.utils.{CopyBetween, TransactionalFileCopier, UUIDF} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.DiskStorage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.DiskStorageSaveFile.computeLocation -import fs2.io.file.Path - -import java.nio.file -import java.util.UUID - -trait DiskStorageCopyFiles { - def copyFiles(destStorage: DiskStorage, details: NonEmptyList[DiskCopyDetails]): IO[NonEmptyList[FileAttributes]] -} - -object DiskStorageCopyFiles { - def mk(copier: TransactionalFileCopier, uuidf: UUIDF): DiskStorageCopyFiles = new DiskStorageCopyFiles { - - def copyFiles(destStorage: DiskStorage, details: NonEmptyList[DiskCopyDetails]): IO[NonEmptyList[FileAttributes]] = - details - .traverse(mkCopyDetailsAndDestAttributes(destStorage, _)) - .flatMap { copyDetailsAndDestAttributes => - val copyDetails = copyDetailsAndDestAttributes.map(_._1) - val destDetails = copyDetailsAndDestAttributes.map { case (_, attributes) => - attributes - } - copier.copyAll(copyDetails).as(destDetails) - } - - private def mkCopyDetailsAndDestAttributes(destStorage: DiskStorage, copyFile: DiskCopyDetails) = - for { - sourcePath <- absoluteDiskPathFromAttributes(copyFile.sourceAttributes) - uuid <- uuidf() - (destPath, destRelativePath) <- computeDestLocation(uuid, destStorage, copyFile) - destAttr = mkDestAttributes(uuid, copyFile, destPath, destRelativePath) - copyDetails <- absoluteDiskPathFromAttributes(destAttr).map { dest => - CopyBetween(Path.fromNioPath(sourcePath), Path.fromNioPath(dest)) - } - } yield (copyDetails, destAttr) - - private def computeDestLocation(uuid: UUID, destStorage: DiskStorage, cd: DiskCopyDetails) = - computeLocation(destStorage.project, destStorage.value.volume, cd.sourceAttributes.filename, uuid) - - private def mkDestAttributes(uuid: UUID, cd: DiskCopyDetails, destPath: file.Path, destRelativePath: file.Path) = - FileAttributes( - uuid = uuid, - location = Uri(destPath.toUri.toString), - path = Uri.Path(destRelativePath.toString), - filename = cd.sourceAttributes.filename, - description = cd.sourceAttributes.description, - name = cd.sourceAttributes.name, - mediaType = cd.sourceAttributes.mediaType, - keywords = cd.sourceAttributes.keywords, - bytes = cd.sourceAttributes.bytes, - digest = cd.sourceAttributes.digest, - origin = cd.sourceAttributes.origin - ) - } -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageCopyFiles.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageCopyFiles.scala deleted file mode 100644 index 3a0573e092..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageCopyFiles.scala +++ /dev/null @@ -1,68 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote - -import akka.http.scaladsl.model.Uri -import akka.http.scaladsl.model.Uri.Path -import cats.data.NonEmptyList -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileCustomMetadata} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.RemoteDiskStorage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.FileOperations.intermediateFolders -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.{RemoteDiskCopyDetails, RemoteDiskCopyPaths} - -trait RemoteDiskStorageCopyFiles { - def copyFiles( - destStorage: RemoteDiskStorage, - copyDetails: NonEmptyList[RemoteDiskCopyDetails] - ): IO[NonEmptyList[FileAttributes]] -} - -object RemoteDiskStorageCopyFiles { - - def mk(client: RemoteDiskStorageClient): RemoteDiskStorageCopyFiles = new RemoteDiskStorageCopyFiles { - def copyFiles( - destStorage: RemoteDiskStorage, - copyDetails: NonEmptyList[RemoteDiskCopyDetails] - ): IO[NonEmptyList[FileAttributes]] = { - - val paths = remoteDiskCopyPaths(destStorage, copyDetails) - - client.copyFiles(destStorage.value.folder, paths).map { destPaths => - copyDetails.zip(paths).zip(destPaths).map { case ((copyDetails, remoteCopyPaths), absoluteDestPath) => - mkDestAttributes(copyDetails, remoteCopyPaths.destPath, absoluteDestPath) - } - } - } - } - - private def mkDestAttributes( - cd: RemoteDiskCopyDetails, - relativeDestPath: Path, - absoluteDestPath: Uri - ): FileAttributes = { - val sourceFileMetadata = cd.sourceMetadata - val sourceFileDescription = cd.sourceUserSuppliedMetadata - val customMetadata = sourceFileDescription.metadata.getOrElse(FileCustomMetadata.empty) - FileAttributes( - uuid = cd.destUuid, - location = absoluteDestPath, - path = relativeDestPath, - filename = sourceFileDescription.filename, - mediaType = sourceFileDescription.mediaType, - keywords = customMetadata.keywords.getOrElse(Map.empty), - description = customMetadata.description, - name = customMetadata.name, - bytes = sourceFileMetadata.bytes, - digest = sourceFileMetadata.digest, - origin = sourceFileMetadata.origin - ) - } - - private def remoteDiskCopyPaths(destStorage: RemoteDiskStorage, copyDetails: NonEmptyList[RemoteDiskCopyDetails]) = - copyDetails.map { cd => - val destinationPath = - Uri.Path(intermediateFolders(destStorage.project, cd.destUuid, cd.sourceUserSuppliedMetadata.filename)) - val sourcePath = cd.sourcePath - RemoteDiskCopyPaths(cd.sourceBucket, sourcePath, destinationPath) - } -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala index 230316440a..1123a78697 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala @@ -2,20 +2,18 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote import akka.actor.ActorSystem import akka.http.scaladsl.client.RequestBuilding._ +import akka.http.scaladsl.model.BodyPartEntity import akka.http.scaladsl.model.Multipart.FormData import akka.http.scaladsl.model.Multipart.FormData.BodyPart import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.model.Uri.Path -import akka.http.scaladsl.model.{BodyPartEntity, Uri} -import cats.data.NonEmptyList import cats.effect.IO import cats.implicits._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.RemoteDiskStorageConfig import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchFileRejection.UnexpectedFetchError import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.MoveFileRejection.UnexpectedMoveError -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{CopyFileRejection, FetchFileRejection, MoveFileRejection, SaveFileRejection} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.{RemoteDiskCopyPaths, RemoteDiskStorageFileAttributes} -import ch.epfl.bluebrain.nexus.delta.rdf.implicits.uriDecoder +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{FetchFileRejection, MoveFileRejection, SaveFileRejection} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.RemoteDiskStorageFileAttributes import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords import ch.epfl.bluebrain.nexus.delta.sdk.AkkaSource import ch.epfl.bluebrain.nexus.delta.sdk.auth.{AuthTokenProvider, Credentials} @@ -106,20 +104,6 @@ trait RemoteDiskStorageClient { sourceRelativePath: Path, destRelativePath: Path ): IO[RemoteDiskStorageFileAttributes] - - /** - * Copies files to a destination bucket. Source files can be located within different buckets. File attributes are - * not recomputed, so only the file paths will change. - * - * If any copies fail the whole operation will be aborted and the remote storage service will return an error. - * - * @return - * Absolute locations of the created files, preserving the input order. - */ - def copyFiles( - destBucket: Label, - files: NonEmptyList[RemoteDiskCopyPaths] - ): IO[NonEmptyList[Uri]] } object RemoteDiskStorageClient { @@ -226,21 +210,6 @@ object RemoteDiskStorageClient { } } - def copyFiles( - destBucket: Label, - files: NonEmptyList[RemoteDiskCopyPaths] - ): IO[NonEmptyList[Uri]] = - getAuthToken(credentials).flatMap { authToken => - val endpoint = baseUri.endpoint / "buckets" / destBucket.value / "files" - - implicit val dec: Decoder[NonEmptyList[Uri]] = Decoder[NonEmptyList[Json]].emap { nel => - nel.traverse(_.hcursor.get[Uri]("absoluteDestinationLocation").leftMap(_.toString())) - } - client - .fromJsonTo[NonEmptyList[Uri]](Post(endpoint, files.asJson).withCredentials(authToken)) - .adaptError { case error: HttpClientError => CopyFileRejection.RemoteDiskClientError(error) } - } - private def bucketNotFoundType(error: HttpClientError): Boolean = error.jsonBody.fold(false)(_.hcursor.get[String](keywords.tpe).toOption.contains("BucketNotFound")) @@ -275,9 +244,6 @@ object RemoteDiskStorageClient { sourceRelativePath: Path, destRelativePath: Path ): IO[RemoteDiskStorageFileAttributes] = disabledError - - override def copyFiles(destBucket: Label, files: NonEmptyList[RemoteDiskCopyPaths]): IO[NonEmptyList[Uri]] = - disabledError } def apply(client: HttpClient, authTokenProvider: AuthTokenProvider, configOpt: Option[RemoteDiskStorageConfig])( diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskCopyDetails.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskCopyDetails.scala deleted file mode 100644 index e6f886734b..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskCopyDetails.scala +++ /dev/null @@ -1,17 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model - -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileDescription, FileMetadata} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.RemoteDiskStorage -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label - -import akka.http.scaladsl.model.Uri.Path -import java.util.UUID - -final case class RemoteDiskCopyDetails( - destUuid: UUID, - destStorage: RemoteDiskStorage, - sourcePath: Path, - sourceBucket: Label, - sourceMetadata: FileMetadata, - sourceUserSuppliedMetadata: FileDescription -) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskCopyPaths.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskCopyPaths.scala deleted file mode 100644 index c19726b430..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskCopyPaths.scala +++ /dev/null @@ -1,19 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model - -import akka.http.scaladsl.model.Uri.Path -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label -import io.circe.syntax.KeyOps -import io.circe.{Encoder, Json} - -final case class RemoteDiskCopyPaths( - sourceBucket: Label, - sourcePath: Path, - destPath: Path -) - -object RemoteDiskCopyPaths { - implicit val enc: Encoder[RemoteDiskCopyPaths] = Encoder.instance { - case RemoteDiskCopyPaths(sourceBucket, source, dest) => - Json.obj("sourceBucket" := sourceBucket, "source" := source.toString(), "destination" := dest.toString()) - } -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutes.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutes.scala index 3220dd8987..0f4c423716 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutes.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutes.scala @@ -3,6 +3,7 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.routes import akka.http.scaladsl.model.StatusCodes.Created import akka.http.scaladsl.server._ import cats.implicits._ +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragePluginExceptionHandler.handleStorageExceptions import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{Storage, StorageRejection} @@ -20,7 +21,6 @@ import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.{OriginalSource, RdfMarshalling} import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri import io.circe.Json -import kamon.instrumentation.akka.http.TracingDirectives.operationName /** * The storages routes @@ -52,26 +52,21 @@ final class StoragesRoutes( with CirceUnmarshalling with RdfMarshalling { - import baseUri.prefixSegment import schemeDirectives._ def routes: Route = (baseUriPrefix(baseUri.prefix) & replaceUri("storages", schemas.storage)) { - pathPrefix("storages") { + (handleStorageExceptions & pathPrefix("storages")) { extractCaller { implicit caller => projectRef { project => concat( - (pathEndOrSingleSlash & operationName(s"$prefixSegment/storages/{org}/{project}")) { + pathEndOrSingleSlash { // Create a storage without id segment (post & noParameter("rev") & entity(as[Json]) & indexingMode) { (source, mode) => authorizeFor(project, Write).apply { emit( Created, - storages - .create(project, source) - .flatTap(index(project, _, mode)) - .mapValue(_.metadata) - .attemptNarrow[StorageRejection] + storages.create(project, source).flatTap(index(project, _, mode)).mapValue(_.metadata) ) } } @@ -79,64 +74,60 @@ final class StoragesRoutes( (idSegment & indexingMode) { (id, mode) => concat( pathEndOrSingleSlash { - operationName(s"$prefixSegment/storages/{org}/{project}/{id}") { - concat( - // Create or update a storage - put { - authorizeFor(project, Write).apply { - (parameter("rev".as[Int].?) & pathEndOrSingleSlash & entity(as[Json])) { - case (None, source) => - // Create a storage with id segment - emit( - Created, - storages - .create(id, project, source) - .flatTap(index(project, _, mode)) - .mapValue(_.metadata) - .attemptNarrow[StorageRejection] - ) - case (Some(rev), source) => - // Update a storage - emit( - storages - .update(id, project, rev, source) - .flatTap(index(project, _, mode)) - .mapValue(_.metadata) - .attemptNarrow[StorageRejection] - ) - } + concat( + // Create or update a storage + put { + authorizeFor(project, Write).apply { + (parameter("rev".as[Int].?) & pathEndOrSingleSlash & entity(as[Json])) { + case (None, source) => + // Create a storage with id segment + emit( + Created, + storages + .create(id, project, source) + .flatTap(index(project, _, mode)) + .mapValue(_.metadata) + ) + case (Some(rev), source) => + // Update a storage + emit( + storages + .update(id, project, rev, source) + .flatTap(index(project, _, mode)) + .mapValue(_.metadata) + ) } - }, - // Deprecate a storage - (delete & parameter("rev".as[Int])) { rev => - authorizeFor(project, Write).apply { + } + }, + // Deprecate a storage + (delete & parameter("rev".as[Int])) { rev => + authorizeFor(project, Write).apply { + emit( + storages + .deprecate(id, project, rev) + .flatTap(index(project, _, mode)) + .mapValue(_.metadata) + .attemptNarrow[StorageRejection] + .rejectOn[StorageNotFound] + ) + } + }, + // Fetch a storage + (get & idSegmentRef(id)) { id => + emitOrFusionRedirect( + project, + id, + authorizeFor(project, Read).apply { emit( storages - .deprecate(id, project, rev) - .flatTap(index(project, _, mode)) - .mapValue(_.metadata) + .fetch(id, project) .attemptNarrow[StorageRejection] .rejectOn[StorageNotFound] ) } - }, - // Fetch a storage - (get & idSegmentRef(id)) { id => - emitOrFusionRedirect( - project, - id, - authorizeFor(project, Read).apply { - emit( - storages - .fetch(id, project) - .attemptNarrow[StorageRejection] - .rejectOn[StorageNotFound] - ) - } - ) - } - ) - } + ) + } + ) }, // Undeprecate a storage (pathPrefix("undeprecate") & pathEndOrSingleSlash & put & parameter("rev".as[Int])) { rev => @@ -162,7 +153,7 @@ final class StoragesRoutes( }, (pathPrefix("statistics") & get & pathEndOrSingleSlash) { authorizeFor(project, Read).apply { - emit(storagesStatistics.get(id, project).attemptNarrow[StorageRejection]) + emit(storagesStatistics.get(id, project)) } } ) diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala index 71bc80e8a7..fc73b232e0 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala @@ -19,7 +19,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejec import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType.{RemoteDiskStorage => RemoteStorageType} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.{AkkaSourceHelpers, FileOperations} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{StorageFixtures, Storages, StoragesConfig} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{FetchStorage, StorageFixtures, Storages, StoragesConfig} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv import ch.epfl.bluebrain.nexus.delta.sdk.ConfigFixtures @@ -36,6 +36,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContextDummy import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ProjectRejection.{ProjectIsDeprecated, ProjectNotFound} import ch.epfl.bluebrain.nexus.delta.sdk.resolvers.ResolverContextResolution import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Authenticated, Group, User} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ResourceRef.Latest import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef, ResourceRef} import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.DoobieScalaTestFixture @@ -137,17 +138,11 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) clock ).accepted + lazy val fetchStorage = FetchStorage(storages, aclCheck) lazy val fileOps: FileOperations = FileOperationsMock.forDiskAndRemoteDisk(remoteDiskStorageClient) - lazy val files: Files = Files( - fetchContext, - aclCheck, - storages, - xas, - FilesConfig(eventLogConfig, MediaTypeDetectorConfig.Empty), - fileOps, - clock - ) + val filesConfig = FilesConfig(eventLogConfig, MediaTypeDetectorConfig.Empty) + lazy val files: Files = Files(fetchContext, fetchStorage, xas, filesConfig, fileOps, clock) def fileId(file: String): FileId = FileId(file, projectRef) def fileIdIri(iri: Iri): FileId = FileId(iri, projectRef) @@ -164,6 +159,15 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) ): FileResource = FileGen.resourceFor(id, project, storage, attributes, storageType, rev, deprecated, tags, bob, bob) + def updateAttributes(file: Iri) = { + val aliceCaller = Caller(alice, Set(alice, Group("mygroup", realm), Authenticated(realm))) + for { + file <- files.fetchState(Latest(file), projectRef) + storage <- fetchStorage.onRead(file.storage, projectRef)(aliceCaller) + _ <- files.updateAttributes(file, storage) + } yield () + } + "creating a file" should { "create storages for files" in { @@ -269,7 +273,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "reject if storage does not exist" in { val request = FileUploadRequest.from(entity()) - val expectedError = WrappedStorageRejection(StorageNotFound(storageIri, projectRef)) + val expectedError = StorageNotFound(storageIri, projectRef) files.create(fileId("file2"), Some(storage), request, None).rejected shouldEqual expectedError } @@ -340,8 +344,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "reject if storage does not exist" in { files .createLegacyLink(fileId("file3"), Some(storage), description("myfile.txt"), Uri.Path.Empty, None) - .rejected shouldEqual - WrappedStorageRejection(StorageNotFound(storageIri, projectRef)) + .rejected shouldEqual StorageNotFound(storageIri, projectRef) } "reject if project does not exist" in { @@ -385,8 +388,10 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "reject if storage does not exist" in { val request = FileUploadRequest.from(entity()) - files.update(fileId("file1"), Some(storage), 2, request, None).rejected shouldEqual - WrappedStorageRejection(StorageNotFound(storageIri, projectRef)) + files.update(fileId("file1"), Some(storage), 2, request, None).rejected shouldEqual StorageNotFound( + storageIri, + projectRef + ) } "reject if project does not exist" in { @@ -470,14 +475,10 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "updating remote disk file attributes" should { - "reject if digest is already computed" in { - files.updateAttributes(file1, projectRef).rejectedWith[DigestAlreadyComputed] - } - "succeed" in { - val tempAttr = attributes("myfile.txt") - val attr = tempAttr.copy(location = Uri(s"file:///app/nexustest/nexus/${tempAttr.path}"), origin = Storage) - val expected = mkResource( + val tempAttr = attributes("myfile.txt") + val attr = tempAttr.copy(location = Uri(s"file:///app/nexustest/nexus/${tempAttr.path}"), origin = Storage) + val expected = mkResource( file2, projectRef, remoteRev, @@ -486,14 +487,15 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) rev = 2, tags = Tags(tag -> 1) ) - val updatedF2 = for { - _ <- files.updateAttributes(file2, projectRef) - f <- files.fetch(fileIdIri(file2)) - } yield f - updatedF2.accepted shouldEqual expected + + (updateAttributes(file2) >> files.fetch(fileIdIri(file2))).accepted shouldEqual expected } } + "reject if digest is already computed" in { + updateAttributes(file2).rejectedWith[DigestAlreadyComputed] + } + "updating a file linking" should { "succeed and tag" in { @@ -534,14 +536,8 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val (name, desc, keywords) = (genString(), genString(), genKeywords()) val originalFileDescription = description("file-6.txt") - val updatedFileDescription = descriptionWithMetadata("file-6.txt", name, desc, keywords) files.createLegacyLink(id, Some(remoteId), originalFileDescription, path, None).accepted - - val fetched = files.fetch(id).accepted - files.updateAttributes(fetched.id, projectRef).accepted - files.updateLegacyLink(id, Some(remoteId), updatedFileDescription, path, 2, None) - eventually { files.fetch(id).map { fetched => fetched.value.attributes.name should contain(name) @@ -568,8 +564,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val storage = nxv + "other-storage" files .updateLegacyLink(fileId("file1"), Some(storage), description("myfile.txt"), Uri.Path.Empty, 2, None) - .rejected shouldEqual - WrappedStorageRejection(StorageNotFound(storage, projectRef)) + .rejected shouldEqual StorageNotFound(storage, projectRef) } "reject if project does not exist" in { diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchCopySuite.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchCopySuite.scala deleted file mode 100644 index 8c30559984..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchCopySuite.scala +++ /dev/null @@ -1,261 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch - -import cats.data.NonEmptyList -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch.BatchCopySuite._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.generators.FileGen -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.CopyFileSource -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{FetchFileResource, FileFixtures, FileResource} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, RemoteDiskStorage, S3Storage} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{Storage, StorageType} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.CopyFileRejection.{DifferentStorageTypes, SourceFileTooLarge, UnsupportedOperation} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.{DiskCopyDetails, DiskStorageCopyFiles} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.RemoteDiskStorageCopyFiles -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.RemoteDiskCopyDetails -import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv -import ch.epfl.bluebrain.nexus.delta.sdk.acls.model.AclAddress -import ch.epfl.bluebrain.nexus.delta.sdk.acls.{AclCheck, AclSimpleCheck} -import ch.epfl.bluebrain.nexus.delta.sdk.error.ServiceError.AuthorizationFailed -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.User -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ProjectRef, ResourceRef} -import ch.epfl.bluebrain.nexus.testkit.Generators -import ch.epfl.bluebrain.nexus.testkit.mu.NexusSuite - -import scala.collection.mutable.ListBuffer - -class BatchCopySuite extends NexusSuite with StorageFixtures with Generators with FileFixtures with FileGen { - - private val sourceProj = genProject() - private val sourceFileId = genFileId(sourceProj.ref) - private val source = CopyFileSource(sourceProj.ref, NonEmptyList.of(sourceFileId)) - private val keywords = genKeywords() - private val description = genString() - private val name = genString() - private val stubbedFileAttr = - attributes(genString(), keywords = keywords, description = Some(description), name = Some(name)) - - test("successfully perform disk copy") { - val events = ListBuffer.empty[Event] - val (sourceFileRes, sourceStorage) = - genFileResourceAndStorage(sourceFileId, sourceProj.context, diskVal, keywords, description, name) - val (user, aclCheck) = userAuthorizedOnProjectStorage(sourceStorage.value) - - val batchCopy = mkBatchCopy( - fetchFile = stubbedFetchFile(sourceFileRes, events), - fetchStorage = stubbedFetchStorage(sourceStorage, events), - aclCheck = aclCheck, - diskCopy = stubbedDiskCopy(NonEmptyList.of(stubbedFileAttr), events) - ) - val destStorage: DiskStorage = genDiskStorage() - - batchCopy.copyFiles(source, destStorage)(caller(user)).map { obtained => - val obtainedEvents = events.toList - assertEquals(obtained, NonEmptyList.of(stubbedFileAttr)) - sourceFileWasFetched(obtainedEvents, sourceFileId) - sourceStorageWasFetched(obtainedEvents, sourceFileRes.value.storage, sourceProj.ref) - diskCopyWasPerformed( - obtainedEvents, - destStorage, - sourceFileRes.value.attributes - ) - } - } - - test("successfully perform remote disk copy") { - val events = ListBuffer.empty[Event] - val (sourceFileRes, sourceStorage) = - genFileResourceAndStorage(sourceFileId, sourceProj.context, remoteVal, keywords, description, name) - val (user, aclCheck) = userAuthorizedOnProjectStorage(sourceStorage.value) - - val batchCopy = mkBatchCopy( - fetchFile = stubbedFetchFile(sourceFileRes, events), - fetchStorage = stubbedFetchStorage(sourceStorage, events), - aclCheck = aclCheck, - remoteCopy = stubbedRemoteCopy(NonEmptyList.of(stubbedFileAttr), events) - ) - val destStorage: RemoteDiskStorage = genRemoteStorage() - - batchCopy.copyFiles(source, destStorage)(caller(user)).map { obtained => - val obtainedEvents = events.toList - assertEquals(obtained, NonEmptyList.of(stubbedFileAttr)) - sourceFileWasFetched(obtainedEvents, sourceFileId) - sourceStorageWasFetched(obtainedEvents, sourceFileRes.value.storage, sourceProj.ref) - remoteDiskCopyWasPerformed( - obtainedEvents, - destStorage, - sourceFileRes.value.attributes - ) - } - } - - test("fail if destination storage is S3") { - val batchCopy = mkBatchCopy() - val (user, destStorage) = (genUser(), genS3Storage()) - val expectedError = UnsupportedOperation(StorageType.S3Storage) - batchCopy.copyFiles(source, destStorage)(caller(user)).interceptEquals(expectedError) - } - - test("fail if a source storage is different to destination storage") { - val events = ListBuffer.empty[Event] - val (sourceFileRes, sourceStorage) = - genFileResourceAndStorage(sourceFileId, sourceProj.context, diskVal, keywords, description, name) - val (user, aclCheck) = userAuthorizedOnProjectStorage(sourceStorage.value) - - val batchCopy = mkBatchCopy( - fetchFile = stubbedFetchFile(sourceFileRes, events), - fetchStorage = stubbedFetchStorage(sourceStorage, events), - aclCheck = aclCheck - ) - val expectedError = DifferentStorageTypes(sourceStorage.id, StorageType.DiskStorage, StorageType.RemoteDiskStorage) - - batchCopy.copyFiles(source, genRemoteStorage())(caller(user)).interceptEquals(expectedError).map { _ => - val obtainedEvents = events.toList - sourceFileWasFetched(obtainedEvents, sourceFileId) - sourceStorageWasFetched(obtainedEvents, sourceFileRes.value.storage, sourceProj.ref) - } - } - - test("fail if user does not have read access on a source file's storage") { - val events = ListBuffer.empty[Event] - val (sourceFileRes, sourceStorage) = - genFileResourceAndStorage(sourceFileId, sourceProj.context, diskVal, keywords, description, name) - val user = genUser() - val aclCheck = AclSimpleCheck((user, AclAddress.fromProject(sourceProj.ref), Set())).accepted - - val batchCopy = mkBatchCopy( - fetchFile = stubbedFetchFile(sourceFileRes, events), - fetchStorage = stubbedFetchStorage(sourceStorage, events), - aclCheck = aclCheck - ) - - batchCopy.copyFiles(source, genDiskStorage())(caller(user)).intercept[AuthorizationFailed].map { _ => - val obtainedEvents = events.toList - sourceFileWasFetched(obtainedEvents, sourceFileId) - sourceStorageWasFetched(obtainedEvents, sourceFileRes.value.storage, sourceProj.ref) - } - } - - test("fail if a single source file exceeds max size for destination storage") { - val events = ListBuffer.empty[Event] - val (sourceFileRes, sourceStorage) = - genFileResourceAndStorage(sourceFileId, sourceProj.context, diskVal, keywords, description, name, 1000L) - val (user, aclCheck) = userAuthorizedOnProjectStorage(sourceStorage.value) - - val batchCopy = mkBatchCopy( - fetchFile = stubbedFetchFile(sourceFileRes, events), - fetchStorage = stubbedFetchStorage(sourceStorage, events), - aclCheck = aclCheck - ) - val destStorage = genDiskStorage() - val error = SourceFileTooLarge(destStorage.value.maxFileSize, destStorage.id) - - batchCopy.copyFiles(source, destStorage)(caller(user)).interceptEquals(error).map { _ => - val obtainedEvents = events.toList - sourceFileWasFetched(obtainedEvents, sourceFileId) - sourceStorageWasFetched(obtainedEvents, sourceFileRes.value.storage, sourceProj.ref) - } - } - - private def mkBatchCopy( - fetchFile: FetchFileResource = FetchFileResourceMock.unimplemented, - fetchStorage: FetchStorage = FetchStorageMock.unimplemented, - aclCheck: AclCheck = AclSimpleCheck().accepted, - diskCopy: DiskStorageCopyFiles = DiskCopyMock.unimplemented, - remoteCopy: RemoteDiskStorageCopyFiles = RemoteCopyMock.unimplemented - ): BatchCopy = BatchCopy.mk(fetchFile, fetchStorage, aclCheck, diskCopy, remoteCopy) - - private def userAuthorizedOnProjectStorage(storage: Storage): (User, AclCheck) = { - val user = genUser() - val permissions = Set(storage.storageValue.readPermission) - (user, AclSimpleCheck((user, AclAddress.fromProject(storage.project), permissions)).accepted) - } - - private def sourceFileWasFetched(events: List[Event], id: FileId): Unit = { - val obtained = events.collectFirst { case f: FetchFileCalled => f } - assertEquals(obtained, Some(FetchFileCalled(id))) - } - - private def sourceStorageWasFetched(events: List[Event], storageRef: ResourceRef.Revision, proj: ProjectRef): Unit = { - val obtained = events.collectFirst { case f: FetchStorageCalled => f } - assertEquals(obtained, Some(FetchStorageCalled(IdSegmentRef(storageRef), proj))) - } - - private def diskCopyWasPerformed( - events: List[Event], - storage: DiskStorage, - sourceAttr: LimitedFileAttributes - ): Unit = { - val expectedDiskCopyDetails = DiskCopyDetails(storage, sourceAttr) - val obtained = events.collectFirst { case f: DiskCopyCalled => f } - assertEquals(obtained, Some(DiskCopyCalled(storage, NonEmptyList.of(expectedDiskCopyDetails)))) - } - - private def remoteDiskCopyWasPerformed( - events: List[Event], - storage: RemoteDiskStorage, - sourceAttr: FileAttributes - ): Unit = { - val expectedCopyDetails = - RemoteDiskCopyDetails( - uuid, - storage, - sourceAttr.path, - storage.value.folder, - FileMetadata.from(sourceAttr), - FileDescription.from(sourceAttr) - ) - val obtained = events.collectFirst { case f: RemoteCopyCalled => f } - assertEquals(obtained, Some(RemoteCopyCalled(storage, NonEmptyList.of(expectedCopyDetails)))) - } - - private def caller(user: User): Caller = Caller(user, Set(user)) - - private def genDiskStorage() = - DiskStorage(nxv + genString(), genProject().ref, diskVal, json"""{"disk": "value"}""") - - private def genRemoteStorage() = - RemoteDiskStorage(nxv + genString(), genProject().ref, remoteVal, json"""{"disk": "value"}""") - - private def genS3Storage() = - S3Storage(nxv + genString(), genProject().ref, s3Val, json"""{"disk": "value"}""") -} - -object BatchCopySuite { - sealed trait Event - final case class FetchFileCalled(id: FileId) extends Event - final case class FetchStorageCalled(id: IdSegmentRef, project: ProjectRef) extends Event - final case class DiskCopyCalled(destStorage: Storage.DiskStorage, details: NonEmptyList[DiskCopyDetails]) - extends Event - final case class RemoteCopyCalled( - destStorage: Storage.RemoteDiskStorage, - copyDetails: NonEmptyList[RemoteDiskCopyDetails] - ) extends Event - - private def stubbedFetchFile(sourceFileRes: FileResource, events: ListBuffer[Event]) = - FetchFileResourceMock.withMockedFetch(id => addEventAndReturn(events, FetchFileCalled(id), sourceFileRes)) - - private def stubbedFetchStorage(storage: StorageResource, events: ListBuffer[Event]) = - FetchStorageMock.withMockedFetch((id, proj) => addEventAndReturn(events, FetchStorageCalled(id, proj), storage)) - - private def stubbedRemoteCopy(stubbedRemoteFileAttr: NonEmptyList[FileAttributes], events: ListBuffer[Event]) = - RemoteCopyMock.withMockedCopy((storage, details) => - addEventAndReturn(events, RemoteCopyCalled(storage, details), stubbedRemoteFileAttr) - ) - - private def stubbedDiskCopy(stubbedDiskFileAttr: NonEmptyList[FileAttributes], events: ListBuffer[Event]) = - DiskCopyMock.withMockedCopy((storage, details) => - addEventAndReturn(events, DiskCopyCalled(storage, details), stubbedDiskFileAttr) - ) - - private def addEventAndReturn[A](events: ListBuffer[Event], event: Event, a: A): IO[A] = - addEventAndReturnIO(events, event, IO.pure(a)) - - private def addEventAndReturnIO[A](events: ListBuffer[Event], event: Event, io: IO[A]): IO[A] = - IO(events.addOne(event)) >> io - -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchFilesSuite.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchFilesSuite.scala deleted file mode 100644 index 7408282d3a..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/batch/BatchFilesSuite.scala +++ /dev/null @@ -1,130 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch - -import cats.data.NonEmptyList -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch.BatchFilesSuite._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.generators.FileGen -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks.BatchCopyMock -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileCommand.CreateFile -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.CopyRejection -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileCommand} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.CopyFileSource -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{FetchFileStorage, FileFixtures, FileResource} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StorageFixtures -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.CopyFileRejection.TotalCopySizeTooLarge -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment -import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.{Project, ProjectContext} -import ch.epfl.bluebrain.nexus.delta.sdk.projects.{FetchContext, FetchContextDummy} -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ProjectRef, ResourceRef} -import ch.epfl.bluebrain.nexus.testkit.Generators -import ch.epfl.bluebrain.nexus.testkit.mu.NexusSuite - -import java.util.UUID -import scala.collection.mutable.ListBuffer - -class BatchFilesSuite extends NexusSuite with StorageFixtures with Generators with FileFixtures with FileGen { - - private val destProj: Project = genProject() - private val (destStorageRef, destStorage) = (genRevision(), genStorage(destProj.ref, diskVal)) - private val destFileUUId = UUID.randomUUID() // Not testing UUID generation, same for all of them - private val destination = genCopyFileDestination(destProj.ref, destStorage.storage) - - test("batch copying should fetch storage, perform copy and evaluate create file commands") { - val events = ListBuffer.empty[Event] - val fetchFileStorage = mockFetchFileStorage(destStorageRef, destStorage.storage, events) - val stubbedDestAttributes = genAttributes() - val batchCopy = BatchCopyMock.withStubbedCopyFiles(events, stubbedDestAttributes) - - val batchFiles: BatchFiles = mkBatchFiles(events, destProj, destFileUUId, fetchFileStorage, batchCopy) - implicit val c: Caller = Caller(genUser(), Set()) - val source = genCopyFileSource() - - batchFiles.copyFiles(source, destination).map { obtained => - val expectedCommands = createCommandsFromFileAttributesAndMetadata(stubbedDestAttributes) - val expectedResources = expectedCommands.map(genFileResourceFromCmd) - val expectedCommandCalls = expectedCommands.toList.map(FileCommandEvaluated) - val expectedEvents = activeStorageFetchedAndBatchCopyCalled(source) ++ expectedCommandCalls - - assertEquals(obtained, expectedResources) - assertEquals(events.toList, expectedEvents) - } - } - - test("copy rejections should be mapped to a file rejection") { - val events = ListBuffer.empty[Event] - val fetchFileStorage = mockFetchFileStorage(destStorageRef, destStorage.storage, events) - val error = TotalCopySizeTooLarge(1L, 2L, genIri()) - val batchCopy = BatchCopyMock.withError(error, events) - - val batchFiles: BatchFiles = mkBatchFiles(events, destProj, UUID.randomUUID(), fetchFileStorage, batchCopy) - implicit val c: Caller = Caller(genUser(), Set()) - val source = genCopyFileSource() - val expectedError = CopyRejection(source.project, destProj.ref, destStorage.id, error) - - batchFiles.copyFiles(source, destination).interceptEquals(expectedError).accepted - - assertEquals(events.toList, activeStorageFetchedAndBatchCopyCalled(source)) - } - - def mockFetchFileStorage( - storageRef: ResourceRef.Revision, - storage: Storage, - events: ListBuffer[Event] - ): FetchFileStorage = new FetchFileStorage { - override def fetchAndValidateActiveStorage(storageIdOpt: Option[IdSegment], ref: ProjectRef, pc: ProjectContext)( - implicit caller: Caller - ): IO[(ResourceRef.Revision, Storage)] = - IO(events.addOne(ActiveStorageFetched(storageIdOpt, ref, pc, caller))).as(storageRef -> storage) - } - - def mkBatchFiles( - events: ListBuffer[Event], - proj: Project, - fixedUuid: UUID, - fetchFileStorage: FetchFileStorage, - batchCopy: BatchCopy - ): BatchFiles = { - implicit val uuidF: UUIDF = UUIDF.fixed(fixedUuid) - val evalFileCmd: CreateFile => IO[FileResource] = cmd => - IO(events.addOne(FileCommandEvaluated(cmd))).as(genFileResourceFromCmd(cmd)) - val fetchContext: FetchContext = FetchContextDummy(Map(proj.ref -> proj.context)) - BatchFiles.mk(fetchFileStorage, fetchContext, evalFileCmd, batchCopy) - } - - def activeStorageFetchedAndBatchCopyCalled(source: CopyFileSource)(implicit c: Caller): List[Event] = { - val expectedActiveStorageFetched = ActiveStorageFetched(destination.storage, destProj.ref, destProj.context, c) - val expectedBatchCopyCalled = BatchCopyCalled(source, destStorage.storage, c) - List(expectedActiveStorageFetched, expectedBatchCopyCalled) - } - - def createCommandsFromFileAttributesAndMetadata( - stubbedDestAttributes: NonEmptyList[FileAttributes] - )(implicit - c: Caller - ): NonEmptyList[CreateFile] = stubbedDestAttributes.map { case attr => - CreateFile( - destProj.base.iri / destFileUUId.toString, - destProj.ref, - destStorageRef, - destStorage.value.tpe, - attr, - c.subject, - destination.tag - ) - } -} - -object BatchFilesSuite { - sealed trait Event - final case class ActiveStorageFetched( - storageIdOpt: Option[IdSegment], - ref: ProjectRef, - pc: ProjectContext, - caller: Caller - ) extends Event - final case class BatchCopyCalled(source: CopyFileSource, destStorage: Storage, caller: Caller) extends Event - final case class FileCommandEvaluated(cmd: FileCommand) extends Event -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/generators/FileGen.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/generators/FileGen.scala index 82a4c83f47..ceb08b0c1e 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/generators/FileGen.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/generators/FileGen.scala @@ -2,116 +2,19 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.generators import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` import akka.http.scaladsl.model.Uri -import cats.data.NonEmptyList +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.FileResource import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.ComputedDigest import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileCommand.CreateFile -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{CopyFileDestination, FileAttributes, FileId, FileState} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.CopyFileSource -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{schemas, FileFixtures, FileResource} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileState} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{StorageGen, StorageResource} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri -import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv -import ch.epfl.bluebrain.nexus.delta.sdk.generators.ProjectGen -import ch.epfl.bluebrain.nexus.delta.sdk.model.{IdSegment, Tags} -import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.{ApiMappings, Project, ProjectContext} -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Subject, User} -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag +import ch.epfl.bluebrain.nexus.delta.sdk.model.Tags +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Subject} import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef, ResourceRef} -import ch.epfl.bluebrain.nexus.testkit.Generators import java.nio.file.{Files => JavaFiles} import java.time.Instant import java.util.UUID -import scala.util.Random - -trait FileGen { self: Generators with FileFixtures => - def genProjectRef(): ProjectRef = ProjectRef.unsafe(genString(), genString()) - - def genProject(): Project = { - val projRef = genProjectRef() - val apiMappings = ApiMappings("file" -> schemas.files) - ProjectGen.project(projRef.project.value, projRef.organization.value, base = nxv.base, mappings = apiMappings) - } - - def genUser(realmLabel: Label): User = User(genString(), realmLabel) - def genUser(): User = User(genString(), Label.unsafe(genString())) - - def genFilesIdsInProject(projRef: ProjectRef): NonEmptyList[FileId] = - NonEmptyList.of(genFileId(projRef), genFileId(projRef)) - - def genFileId(projRef: ProjectRef) = FileId(genString(), projRef) - - def genFileIdWithRev(projRef: ProjectRef): FileId = FileId(genString(), 4, projRef) - - def genFileIdWithTag(projRef: ProjectRef): FileId = FileId(genString(), UserTag.unsafe(genString()), projRef) - - def genAttributes(): NonEmptyList[FileAttributes] = { - val proj = genProject() - genFilesIdsInProject(proj.ref) - .map(genFileResource(_, proj.context)) - .map(res => res.value.attributes) - } - - def genCopyFileSource(): CopyFileSource = genCopyFileSource(genProjectRef()) - def genCopyFileSource(proj: ProjectRef) = CopyFileSource(proj, genFilesIdsInProject(proj)) - def genCopyFileDestination(proj: ProjectRef, storage: Storage): CopyFileDestination = - CopyFileDestination(proj, genOption(IdSegment(storage.id.toString)), genOption(genUserTag)) - def genUserTag: UserTag = UserTag.unsafe(genString()) - def genOption[A](genA: => A): Option[A] = if (Random.nextInt(2) % 2 == 0) Some(genA) else None - - def genFileResource(fileId: FileId, context: ProjectContext): FileResource = - genFileResourceWithStorage(fileId, context, genRevision(), genKeywords(), genString(), genString(), 1L) - - def genFileResourceWithStorage( - fileId: FileId, - context: ProjectContext, - storageRef: ResourceRef.Revision, - keywords: Map[Label, String], - description: String, - name: String, - fileSize: Long - ): FileResource = - genFileResourceWithIri( - fileId.id.value.toIri(context.apiMappings, context.base).getOrElse(throw new Exception(s"Bad file $fileId")), - fileId.project, - storageRef, - attributes(genString(), size = fileSize, keywords = keywords, description = Some(description), name = Some(name)) - ) - - def genFileResourceAndStorage( - fileId: FileId, - context: ProjectContext, - storageVal: StorageValue, - keywords: Map[Label, String], - description: String, - name: String, - fileSize: Long = 1L - ): (FileResource, StorageResource) = { - val storageRes = StorageGen.resourceFor(genIri(), fileId.project, storageVal) - val storageRef = ResourceRef.Revision(storageRes.id, storageRes.id, storageRes.rev) - (genFileResourceWithStorage(fileId, context, storageRef, keywords, description, name, fileSize), storageRes) - } - - def genFileResourceWithIri( - iri: Iri, - projRef: ProjectRef, - storageRef: ResourceRef.Revision, - attr: FileAttributes - ): FileResource = - FileGen.resourceFor(iri, projRef, storageRef, attr) - - def genFileResourceFromCmd(cmd: CreateFile): FileResource = - genFileResourceWithIri(cmd.id, cmd.project, cmd.storage, cmd.attributes) - def genIri(): Iri = Iri.unsafe(genString()) - def genStorage(proj: ProjectRef, storageValue: StorageValue): StorageState = - StorageGen.storageState(genIri(), proj, storageValue) - - def genRevision(): ResourceRef.Revision = - ResourceRef.Revision(genIri(), genPosInt()) - def genPosInt(): Int = Random.nextInt(Int.MaxValue) -} object FileGen { diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/BatchCopyMock.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/BatchCopyMock.scala deleted file mode 100644 index 443d21d3b2..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/BatchCopyMock.scala +++ /dev/null @@ -1,39 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks - -import cats.data.NonEmptyList -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch.BatchCopy -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch.BatchFilesSuite.{BatchCopyCalled, Event} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.CopyFileSource -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.CopyFileRejection -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller - -import scala.collection.mutable.ListBuffer - -object BatchCopyMock { - - def withError(e: CopyFileRejection, events: ListBuffer[Event]): BatchCopy = - withMockedCopyFiles((source, destStorage) => - caller => IO(events.addOne(BatchCopyCalled(source, destStorage, caller))) >> IO.raiseError(e) - ) - - def withStubbedCopyFiles( - events: ListBuffer[Event], - stubbedAttr: NonEmptyList[FileAttributes] - ): BatchCopy = - withMockedCopyFiles((source, destStorage) => - caller => IO(events.addOne(BatchCopyCalled(source, destStorage, caller))).as(stubbedAttr) - ) - - def withMockedCopyFiles( - copyFilesMock: (CopyFileSource, Storage) => Caller => IO[NonEmptyList[FileAttributes]] - ): BatchCopy = - new BatchCopy { - override def copyFiles(source: CopyFileSource, destStorage: Storage)(implicit - c: Caller - ): IO[NonEmptyList[FileAttributes]] = copyFilesMock(source, destStorage)(c) - } - -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/BatchFilesMock.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/BatchFilesMock.scala deleted file mode 100644 index f487252f3e..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/BatchFilesMock.scala +++ /dev/null @@ -1,60 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks - -import cats.data.NonEmptyList -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.FileResource -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch.BatchFiles -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{CopyFileDestination, FileId} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.CopyFileSource -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.User -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag - -import scala.collection.mutable.ListBuffer - -object BatchFilesMock { - - def unimplemented: BatchFiles = withMockedCopyFiles((_, _) => _ => IO(???)) - - def withStubbedCopyFiles( - stubbed: NonEmptyList[FileResource], - events: ListBuffer[BatchFilesCopyFilesCalled] - ): BatchFiles = - withMockedCopyFiles((source, dest) => - c => IO(events.addOne(BatchFilesCopyFilesCalled(source, dest, c))).as(stubbed) - ) - - def withError(e: Throwable, events: ListBuffer[BatchFilesCopyFilesCalled]): BatchFiles = - withMockedCopyFiles((source, dest) => - c => IO(events.addOne(BatchFilesCopyFilesCalled(source, dest, c))) >> IO.raiseError(e) - ) - - def withMockedCopyFiles( - copyFilesMock: (CopyFileSource, CopyFileDestination) => Caller => IO[NonEmptyList[FileResource]] - ): BatchFiles = new BatchFiles { - override def copyFiles(source: CopyFileSource, dest: CopyFileDestination)(implicit - c: Caller - ): IO[NonEmptyList[FileResource]] = - copyFilesMock(source, dest)(c) - } - - final case class BatchFilesCopyFilesCalled(source: CopyFileSource, dest: CopyFileDestination, caller: Caller) - - object BatchFilesCopyFilesCalled { - def fromTestData( - destProj: ProjectRef, - sourceProj: ProjectRef, - sourceFiles: NonEmptyList[FileId], - user: User, - destStorage: Option[IdSegment] = None, - destTag: Option[UserTag] = None - ): BatchFilesCopyFilesCalled = { - val expectedCopyFileSource = CopyFileSource(sourceProj, sourceFiles) - val expectedCopyFileDestination = CopyFileDestination(destProj, destStorage, destTag) - BatchFilesCopyFilesCalled(expectedCopyFileSource, expectedCopyFileDestination, Caller(user, Set(user))) - } - } - -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/DiskCopyMock.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/DiskCopyMock.scala deleted file mode 100644 index 3ad1fbe023..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/DiskCopyMock.scala +++ /dev/null @@ -1,17 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks - -import cats.data.NonEmptyList -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.DiskStorage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.{DiskCopyDetails, DiskStorageCopyFiles} - -object DiskCopyMock { - - def unimplemented: DiskStorageCopyFiles = withMockedCopy((_, _) => IO(???)) - - def withMockedCopy( - copyMock: (DiskStorage, NonEmptyList[DiskCopyDetails]) => IO[NonEmptyList[FileAttributes]] - ): DiskStorageCopyFiles = copyMock(_, _) - -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FetchFileResourceMock.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FetchFileResourceMock.scala deleted file mode 100644 index 3a4f2d93d9..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FetchFileResourceMock.scala +++ /dev/null @@ -1,13 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks - -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileId -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{FetchFileResource, FileResource} - -object FetchFileResourceMock { - - def unimplemented: FetchFileResource = withMockedFetch(_ => IO(???)) - - def withMockedFetch(fetchMock: FileId => IO[FileResource]): FetchFileResource = fetchMock(_) - -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FetchStorageMock.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FetchStorageMock.scala deleted file mode 100644 index cd45218d77..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FetchStorageMock.scala +++ /dev/null @@ -1,17 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks - -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{FetchStorage, StorageResource} -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef - -object FetchStorageMock { - - def unimplemented: FetchStorage = withMockedFetch((_, _) => IO(???)) - - def withMockedFetch(fetchMock: (IdSegmentRef, ProjectRef) => IO[StorageResource]): FetchStorage = new FetchStorage { - override def fetch(id: IdSegmentRef, project: ProjectRef): IO[StorageResource] = fetchMock(id, project) - override def fetchDefault(project: ProjectRef): IO[StorageResource] = ??? - } - -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/RemoteCopyMock.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/RemoteCopyMock.scala deleted file mode 100644 index ed80532a91..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/RemoteCopyMock.scala +++ /dev/null @@ -1,18 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks - -import cats.data.NonEmptyList -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.RemoteDiskStorage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.RemoteDiskStorageCopyFiles -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.RemoteDiskCopyDetails - -object RemoteCopyMock { - - def unimplemented: RemoteDiskStorageCopyFiles = withMockedCopy((_, _) => IO(???)) - - def withMockedCopy( - copyMock: (RemoteDiskStorage, NonEmptyList[RemoteDiskCopyDetails]) => IO[NonEmptyList[FileAttributes]] - ): RemoteDiskStorageCopyFiles = copyMock(_, _) - -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/BatchFilesRoutesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/BatchFilesRoutesSpec.scala deleted file mode 100644 index 5b42052bbf..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/BatchFilesRoutesSpec.scala +++ /dev/null @@ -1,265 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes - -import akka.http.scaladsl.model.headers.OAuth2BearerToken -import akka.http.scaladsl.model.{StatusCode, StatusCodes} -import akka.http.scaladsl.server.Route -import cats.data.NonEmptyList -import ch.epfl.bluebrain.nexus.delta.kernel.utils.ClassUtils -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.batch.BatchFiles -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.generators.FileGen -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks.BatchFilesMock -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks.BatchFilesMock.BatchFilesCopyFilesCalled -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.{CopyRejection, FileNotFound, WrappedStorageRejection} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileId, FileRejection} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{contexts => fileContexts, FileFixtures, FileResource} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StorageFixtures -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.{DifferentStorageType, StorageNotFound} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.CopyFileRejection.{SourceFileTooLarge, TotalCopySizeTooLarge, UnsupportedOperation} -import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} -import ch.epfl.bluebrain.nexus.delta.sdk.IndexingAction -import ch.epfl.bluebrain.nexus.delta.sdk.acls.model.AclAddress -import ch.epfl.bluebrain.nexus.delta.sdk.acls.{AclCheck, AclSimpleCheck} -import ch.epfl.bluebrain.nexus.delta.sdk.identities.IdentitiesDummy -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller -import ch.epfl.bluebrain.nexus.delta.sdk.model.{IdSegment, IdSegmentRef} -import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission -import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.Project -import ch.epfl.bluebrain.nexus.delta.sdk.utils.BaseRouteSpec -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.User -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag -import io.circe.Json -import io.circe.syntax.KeyOps -import org.scalatest.Assertion - -import scala.collection.mutable.ListBuffer - -class BatchFilesRoutesSpec extends BaseRouteSpec with StorageFixtures with FileFixtures with FileGen { - - implicit override def rcr: RemoteContextResolution = - RemoteContextResolution.fixedIO( - fileContexts.files -> ContextValue.fromFile("contexts/files.json"), - Vocabulary.contexts.metadata -> ContextValue.fromFile("contexts/metadata.json"), - Vocabulary.contexts.bulkOperation -> ContextValue.fromFile("contexts/bulk-operation.json"), - Vocabulary.contexts.error -> ContextValue.fromFile("contexts/error.json") - ) - - "Batch copying files between projects" should { - - "succeed for source files looked up by latest" in { - val sourceProj = genProject() - val sourceFileIds = genFilesIdsInProject(sourceProj.ref) - testBulkCopySucceedsForStubbedFiles(sourceProj, sourceFileIds) - } - - "succeed for source files looked up by tag" in { - val sourceProj = genProject() - val sourceFileIds = NonEmptyList.of(genFileIdWithTag(sourceProj.ref), genFileIdWithTag(sourceProj.ref)) - testBulkCopySucceedsForStubbedFiles(sourceProj, sourceFileIds) - } - - "succeed for source files looked up by rev" in { - val sourceProj = genProject() - val sourceFileIds = NonEmptyList.of(genFileIdWithRev(sourceProj.ref), genFileIdWithRev(sourceProj.ref)) - testBulkCopySucceedsForStubbedFiles(sourceProj, sourceFileIds) - } - - "succeed with a specific destination storage" in { - val sourceProj = genProject() - val sourceFileIds = genFilesIdsInProject(sourceProj.ref) - val destStorageId = IdSegment(genString()) - testBulkCopySucceedsForStubbedFiles(sourceProj, sourceFileIds, destStorageId = Some(destStorageId)) - } - - "succeed with a user tag applied to destination files" in { - val sourceProj = genProject() - val sourceFileIds = genFilesIdsInProject(sourceProj.ref) - val destTag = UserTag.unsafe(genString()) - testBulkCopySucceedsForStubbedFiles(sourceProj, sourceFileIds, destTag = Some(destTag)) - } - - "return 403 for a user without read permission on the source project" in { - val (sourceProj, destProj, user) = (genProject(), genProject(), genUser(realm)) - val sourceFileIds = genFilesIdsInProject(sourceProj.ref) - - val route = mkRoute(BatchFilesMock.unimplemented, sourceProj, user, permissions = Set()) - val payload = BatchFilesRoutesSpec.mkBulkCopyPayload(sourceProj.ref, sourceFileIds) - - callBulkCopyEndpoint(route, destProj.ref, payload, user) { - response.shouldBeForbidden - } - } - - "return 400 if tag and rev are present simultaneously for a source file" in { - val (sourceProj, destProj, user) = (genProject(), genProject(), genUser(realm)) - - val route = mkRoute(BatchFilesMock.unimplemented, sourceProj, user, permissions = Set()) - val invalidFilePayload = BatchFilesRoutesSpec.mkSourceFilePayload(genString(), Some(3), Some(genString())) - val payload = Json.obj("sourceProjectRef" := sourceProj.ref, "files" := List(invalidFilePayload)) - - callBulkCopyEndpoint(route, destProj.ref, payload, user) { - response.status shouldBe StatusCodes.BadRequest - } - } - - "return 400 for copy errors raised by batch file logic" in { - val unsupportedStorageType = UnsupportedOperation(StorageType.S3Storage) - val fileTooLarge = SourceFileTooLarge(12, genIri()) - val totalSizeTooLarge = TotalCopySizeTooLarge(1L, 2L, genIri()) - - val errors = List(unsupportedStorageType, fileTooLarge, totalSizeTooLarge) - .map(CopyRejection(genProjectRef(), genProjectRef(), genIri(), _)) - - forAll(errors) { error => - val (sourceProj, destProj, user) = (genProject(), genProject(), genUser(realm)) - val sourceFileIds = genFilesIdsInProject(sourceProj.ref) - val events = ListBuffer.empty[BatchFilesCopyFilesCalled] - val batchFiles = BatchFilesMock.withError(error, events) - - val route = mkRoute(batchFiles, sourceProj, user, permissions = Set(files.permissions.read)) - val payload = BatchFilesRoutesSpec.mkBulkCopyPayload(sourceProj.ref, sourceFileIds) - - callBulkCopyEndpoint(route, destProj.ref, payload, user) { - response.status shouldBe StatusCodes.BadRequest - response.asJson shouldBe errorJson(error, Some(ClassUtils.simpleName(error.rejection)), error.loggedDetails) - } - } - } - - "map other file rejections to the correct response" in { - val storageNotFound = WrappedStorageRejection(StorageNotFound(genIri(), genProjectRef())) - val differentStorageType = - WrappedStorageRejection(DifferentStorageType(genIri(), StorageType.DiskStorage, StorageType.RemoteDiskStorage)) - val fileNotFound = FileNotFound(genIri(), genProjectRef()) - - val fileRejections: List[(FileRejection, StatusCode, Json)] = List( - (storageNotFound, StatusCodes.NotFound, errorJson(storageNotFound, Some("ResourceNotFound"))), - (fileNotFound, StatusCodes.NotFound, errorJson(fileNotFound, Some("ResourceNotFound"))), - (differentStorageType, StatusCodes.BadRequest, errorJson(differentStorageType, Some("DifferentStorageType"))) - ) - - forAll(fileRejections) { case (error, expectedStatus, expectedJson) => - val (sourceProj, destProj, user) = (genProject(), genProject(), genUser(realm)) - val sourceFileIds = genFilesIdsInProject(sourceProj.ref) - val events = ListBuffer.empty[BatchFilesCopyFilesCalled] - val batchFiles = BatchFilesMock.withError(error, events) - - val route = mkRoute(batchFiles, sourceProj, user, permissions = Set(files.permissions.read)) - val payload = BatchFilesRoutesSpec.mkBulkCopyPayload(sourceProj.ref, sourceFileIds) - - callBulkCopyEndpoint(route, destProj.ref, payload, user) { - response.status shouldBe expectedStatus - response.asJson shouldBe expectedJson - } - } - } - } - - def errorJson(t: Throwable, specificType: Option[String] = None, details: Option[String] = None): Json = { - val detailsObj = details.fold(Json.obj())(d => Json.obj("details" := d)) - detailsObj.deepMerge( - Json.obj( - "@context" := Vocabulary.contexts.error.toString, - "@type" := specificType.getOrElse(ClassUtils.simpleName(t)), - "reason" := t.getMessage - ) - ) - } - - def mkRoute( - batchFiles: BatchFiles, - proj: Project, - user: User, - permissions: Set[Permission] - ): Route = { - val aclCheck: AclCheck = AclSimpleCheck((user, AclAddress.fromProject(proj.ref), permissions)).accepted - val identities = IdentitiesDummy(Caller(user, Set(user))) - Route.seal(new BatchFilesRoutes(identities, aclCheck, batchFiles, IndexingAction.noop).routes) - } - - def callBulkCopyEndpoint( - route: Route, - destProj: ProjectRef, - payload: Json, - user: User, - destStorageId: Option[IdSegment] = None, - destTag: Option[UserTag] = None - )(assert: => Assertion): Assertion = { - val asUser = addCredentials(OAuth2BearerToken(user.subject)) - val destStorageIdParam = destStorageId.map(id => s"storage=${id.asString}") - val destTagParam = destTag.map(tag => s"tag=${tag.value}") - val params = (destStorageIdParam.toList ++ destTagParam.toList).mkString("&") - Post(s"/v1/bulk/files/$destProj?$params", payload.toEntity()) ~> asUser ~> route ~> check(assert) - } - - def testBulkCopySucceedsForStubbedFiles( - sourceProj: Project, - sourceFileIds: NonEmptyList[FileId], - destStorageId: Option[IdSegment] = None, - destTag: Option[UserTag] = None - ): Assertion = { - val (destProj, user) = (genProject(), genUser(realm)) - val sourceFileResources = sourceFileIds.map(genFileResource(_, destProj.context)) - val events = ListBuffer.empty[BatchFilesCopyFilesCalled] - val stubbedBatchFiles = BatchFilesMock.withStubbedCopyFiles(sourceFileResources, events) - - val route = mkRoute(stubbedBatchFiles, sourceProj, user, Set(files.permissions.read)) - val payload = BatchFilesRoutesSpec.mkBulkCopyPayload(sourceProj.ref, sourceFileIds) - - callBulkCopyEndpoint(route, destProj.ref, payload, user, destStorageId, destTag) { - response.status shouldBe StatusCodes.Created - val expectedBatchFilesCall = - BatchFilesCopyFilesCalled.fromTestData( - destProj.ref, - sourceProj.ref, - sourceFileIds, - user, - destStorageId, - destTag - ) - events.toList shouldBe List(expectedBatchFilesCall) - response.asJson shouldBe expectedBulkCopyJson(sourceFileResources) - } - } - - def expectedBulkCopyJson(stubbedResources: NonEmptyList[FileResource]): Json = - Json.obj( - "@context" := List(Vocabulary.contexts.bulkOperation, Vocabulary.contexts.metadata, fileContexts.files), - "_results" := stubbedResources.map(expectedBulkOperationFileResourceJson) - ) - - def expectedBulkOperationFileResourceJson(res: FileResource): Json = - FilesRoutesSpec - .fileMetadata( - res.value.project, - res.id, - res.value.attributes, - res.value.storage, - res.value.storageType, - res.rev, - res.deprecated, - res.createdBy, - res.updatedBy - ) - .mapObject(_.remove("@context")) -} - -object BatchFilesRoutesSpec { - def mkBulkCopyPayload(sourceProj: ProjectRef, sourceFileIds: NonEmptyList[FileId]): Json = - Json.obj("sourceProjectRef" := sourceProj.toString, "files" := mkSourceFilesPayload(sourceFileIds)) - - def mkSourceFilesPayload(sourceFileIds: NonEmptyList[FileId]): NonEmptyList[Json] = - sourceFileIds.map(id => mkSourceFilePayloadFromIdSegmentRef(id.id)) - - def mkSourceFilePayloadFromIdSegmentRef(id: IdSegmentRef): Json = id match { - case IdSegmentRef.Latest(value) => mkSourceFilePayload(value.asString, None, None) - case IdSegmentRef.Revision(value, rev) => mkSourceFilePayload(value.asString, Some(rev), None) - case IdSegmentRef.Tag(value, tag) => mkSourceFilePayload(value.asString, None, Some(tag.value)) - } - - def mkSourceFilePayload(id: String, rev: Option[Int], tag: Option[String]): Json = - Json.obj("sourceFileId" := id, "sourceRev" := rev, "sourceTag" := tag) -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala index 3eb6ff7042..183a6cb568 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala @@ -15,7 +15,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{contexts => fileContexts, permissions, FileFixtures, Files, FilesConfig} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.FileOperations -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{contexts => storageContexts, permissions => storagesPermissions, StorageFixtures, Storages, StoragesConfig} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{contexts => storageContexts, permissions => storagesPermissions, FetchStorage, StorageFixtures, Storages, StoragesConfig} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.RdfMediaTypes.`application/ld+json` import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary @@ -108,7 +108,7 @@ class FilesRoutesSpec private val aclCheck = AclSimpleCheck().accepted - lazy val storages: Storages = Storages( + lazy val storages: Storages = Storages( fetchContext, ResolverContextResolution(rcr), IO.pure(allowedPerms.toSet), @@ -118,17 +118,11 @@ class FilesRoutesSpec ServiceAccount(User("nexus-sa", Label.unsafe("sa"))), clock ).accepted - lazy val fileOps: FileOperations = FileOperationsMock.disabled - lazy val files: Files = - Files( - fetchContext, - aclCheck, - storages, - xas, - FilesConfig(eventLogConfig, MediaTypeDetectorConfig.Empty), - fileOps, - clock - )(uuidF, typedSystem) + lazy val fileOps: FileOperations = FileOperationsMock.disabled + lazy val fetchStorage: FetchStorage = FetchStorage(storages, aclCheck) + + private val filesConfig = FilesConfig(eventLogConfig, MediaTypeDetectorConfig.Empty) + lazy val files: Files = Files(fetchContext, fetchStorage, xas, filesConfig, fileOps, clock)(uuidF, typedSystem) private val groupDirectives = DeltaSchemeDirectives(fetchContext) private lazy val routes = routesWithIdentities(identities) private def routesWithIdentities(identities: Identities) = diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala index adea8ba835..3933158481 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala @@ -17,7 +17,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ProjectRejection.{Projec import ch.epfl.bluebrain.nexus.delta.sdk.resolvers.ResolverContextResolution import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Authenticated, Group, User} import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef, ResourceRef} import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.DoobieScalaTestFixture import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec import io.circe.Json @@ -242,7 +242,7 @@ private class StoragesSpec } "reject fetch by tag" in { - val id = IdSegmentRef.Tag(rdId, UserTag.unsafe("other")) + val id = ResourceRef.Tag(rdId, UserTag.unsafe("other")) storages.fetch(id, projectRef).rejected shouldEqual FetchByTagNotSupported(id) } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3LocationGeneratorSuite.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3LocationGeneratorSuite.scala index 3168f1d027..4ab7decfff 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3LocationGeneratorSuite.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3LocationGeneratorSuite.scala @@ -11,7 +11,6 @@ class S3LocationGeneratorSuite extends NexusSuite { test("Generate the expected uri") { val prefix = Path("/prefix") - println(prefix) val project = ProjectRef.unsafe("org", "project") val uuid = UUID.fromString("12345678-b2e3-40b9-93de-c809415d7640") val filename = "cat.gif" diff --git a/ship/src/main/scala/ch/epfl/bluebrain/nexus/ship/files/FileProcessor.scala b/ship/src/main/scala/ch/epfl/bluebrain/nexus/ship/files/FileProcessor.scala index cccc4989a7..448ad7551a 100644 --- a/ship/src/main/scala/ch/epfl/bluebrain/nexus/ship/files/FileProcessor.scala +++ b/ship/src/main/scala/ch/epfl/bluebrain/nexus/ship/files/FileProcessor.scala @@ -11,18 +11,19 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileCommand.Can import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileEvent._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.{FileNotFound, IncorrectRev, ResourceAlreadyExists} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.FetchStorage +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.client.S3StorageClient -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{FetchStorage, StorageResource} +import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.JsonLdApi import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContext import ch.epfl.bluebrain.nexus.delta.sdk.resolvers.ResolverContextResolution import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Subject -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, ProjectRef} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ResourceRef.Latest +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, ProjectRef, ResourceRef} import ch.epfl.bluebrain.nexus.delta.sourcing.{ScopedEventLog, Transactors} import ch.epfl.bluebrain.nexus.ship._ -import ch.epfl.bluebrain.nexus.ship.acls.AclWiring.alwaysAuthorize import ch.epfl.bluebrain.nexus.ship.config.InputConfig import ch.epfl.bluebrain.nexus.ship.files.FileCopier.FileCopyResult.{FileCopySkipped, FileCopySuccess} import ch.epfl.bluebrain.nexus.ship.files.FileProcessor.{forceMediaType, logger, patchMediaType} @@ -163,12 +164,23 @@ object FileProcessor { val storages = StorageWiring.storages(fetchContext, rcr, config, clock, xas) - val fe = new FetchStorage { - override def fetch(id: IdSegmentRef, project: ProjectRef): IO[StorageResource] = - storages.flatMap(_.fetch(id, project)) - - override def fetchDefault(project: ProjectRef): IO[StorageResource] = - storages.flatMap(_.fetchDefault(project)) + val fs = new FetchStorage { + override def onRead(id: ResourceRef, project: ProjectRef)(implicit caller: Caller): IO[Storage] = + storages.flatMap(_.fetch(id, project).map(_.value)) + + /** + * Attempts to fetch the provided storage or the default one in a write context + */ + override def onWrite(id: Option[IriOrBNode.Iri], project: ProjectRef)(implicit + caller: Caller + ): IO[(ResourceRef.Revision, Storage)] = + for { + s <- storages + storage <- id match { + case Some(id) => s.fetch(Latest(id), project) + case None => s.fetchDefault(project) + } + } yield ResourceRef.Revision(storage.id, storage.rev) -> storage.value } val fileCopier = FileCopier(s3Client, config.files) @@ -176,9 +188,8 @@ object FileProcessor { new Files( failingFormDataExtractor, ScopedEventLog(definition(clock), config.eventLog, xas), - alwaysAuthorize, fetchContext, - fe, + fs, linkOperationOnly(s3Client) )(FailingUUID) diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/BatchCopySpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/BatchCopySpec.scala deleted file mode 100644 index 069a8e8580..0000000000 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/BatchCopySpec.scala +++ /dev/null @@ -1,231 +0,0 @@ -package ch.epfl.bluebrain.nexus.tests.kg.files - -import akka.http.scaladsl.model.{ContentTypes, StatusCodes} -import akka.util.ByteString -import cats.effect.IO -import cats.effect.unsafe.implicits.global -import cats.implicits.{catsSyntaxParallelTraverse1, toTraverseOps} -import ch.epfl.bluebrain.nexus.delta.kernel.utils.UrlUtils -import ch.epfl.bluebrain.nexus.testkit.scalatest.FileMatchers.{description => descriptionField, keywords => keywordsField, name => nameField} -import ch.epfl.bluebrain.nexus.tests.HttpClient._ -import ch.epfl.bluebrain.nexus.tests.Identity.storages.Coyote -import ch.epfl.bluebrain.nexus.tests.kg.files.BatchCopySpec.{CopyStorageType, Response, StorageDetails} -import ch.epfl.bluebrain.nexus.tests.kg.files.FilesAssertions.expectFileContent -import ch.epfl.bluebrain.nexus.tests.kg.files.model.FileInput -import ch.epfl.bluebrain.nexus.tests.kg.files.model.FileInput._ -import ch.epfl.bluebrain.nexus.tests.{BaseIntegrationSpec, Identity, Optics} -import io.circe.syntax.KeyOps -import io.circe.{Decoder, DecodingFailure, Json, JsonObject} -import org.scalatest.Assertion - -class BatchCopySpec extends BaseIntegrationSpec { - - implicit val currentUser: Identity.UserCredentials = Coyote - - "Batch copying files" should { - val validStorageTypes = List(CopyStorageType.Disk, CopyStorageType.Remote) - - "succeed for a project in the same organization" in { - forAll(validStorageTypes) { storageType => - givenANewOrgProjectAndStorage(storageType) { sourceStorage => - givenANewProjectAndStorageInExistingOrg(sourceStorage.org, storageType) { destStorage => - val sourceFiles = List(emptyTextFile, updatedJsonFileWithContentType, textFileWithContentType) - for { - _ <- sourceFiles.traverse(uploadFile(_, sourceStorage)) - result <- copyFilesAndCheckSavedResourcesAndContents(sourceStorage.projRef, sourceFiles, destStorage) - } yield result - } - }.unsafeToFuture() - } - } - - "succeed for a project in a different organization" in { - forAll(validStorageTypes) { storageType => - givenANewOrgProjectAndStorage(storageType) { sourceStorage => - givenANewOrgProjectAndStorage(storageType) { destStorage => - val sourceFiles = List(emptyTextFile, updatedJsonFileWithContentType, textFileWithContentType) - for { - _ <- sourceFiles.traverse(uploadFile(_, sourceStorage)) - result <- copyFilesAndCheckSavedResourcesAndContents(sourceStorage.projRef, sourceFiles, destStorage) - } yield result - } - }.unsafeToFuture() - } - } - - "succeed for source files in different storages within a project" in { - forAll(validStorageTypes) { storageType => - givenANewOrgProjectAndStorage(storageType) { sourceStorage1 => - givenANewStorageInExistingProject(sourceStorage1.org, sourceStorage1.proj, storageType) { sourceStorage2 => - givenANewOrgProjectAndStorage(storageType) { destStorage => - val (sourceFile1, sourceFile2) = (genTextFileInput(), genTextFileInput()) - - for { - _ <- uploadFile(sourceFile1, sourceStorage1) - _ <- uploadFile(sourceFile2, sourceStorage2) - sourceFiles = List(sourceFile1, sourceFile2) - result <- copyFilesAndCheckSavedResourcesAndContents(sourceStorage1.projRef, sourceFiles, destStorage) - } yield result - } - } - }.unsafeToFuture() - } - } - - "fail if the source and destination storages have different types" in { - givenANewOrgProjectAndStorage(CopyStorageType.Disk) { sourceStorage => - givenANewProjectAndStorageInExistingOrg(sourceStorage.org, CopyStorageType.Remote) { destStorage => - val sourceFiles = List(genTextFileInput(), genTextFileInput()) - for { - _ <- sourceFiles.traverse(uploadFile(_, sourceStorage)) - payload = mkPayload(sourceStorage.projRef, sourceFiles) - uri = s"/bulk/files/${destStorage.projRef}?storage=nxv:${destStorage.storageId}" - result <- deltaClient.post[Json](uri, payload, Coyote) { (_, response) => - response.status shouldEqual StatusCodes.BadRequest - } - } yield result - } - } - } - } - - def genTextFileInput(): FileInput = - FileInput( - genId(), - genString(), - ContentTypes.`text/plain(UTF-8)`, - genString(), - CustomMetadata( - genString(), - genString(), - Map(genString() -> genString()) - ) - ) - - def mkPayload(sourceProjRef: String, sourceFiles: List[FileInput]): Json = { - val sourcePayloads = sourceFiles.map(f => Json.obj("sourceFileId" := f.fileId)) - Json.obj("sourceProjectRef" := sourceProjRef, "files" := sourcePayloads) - } - - def uploadFile(file: FileInput, storage: StorageDetails): IO[Assertion] = - deltaClient.uploadFile(storage.projRef, storage.storageId, file, None) { expectCreated } - - def copyFilesAndCheckSavedResourcesAndContents( - sourceProjRef: String, - sourceFiles: List[FileInput], - destStorage: StorageDetails - ): IO[Assertion] = { - val destProjRef = destStorage.projRef - val payload = mkPayload(sourceProjRef, sourceFiles) - val uri = s"/bulk/files/$destProjRef?storage=nxv:${destStorage.storageId}" - - for { - response <- deltaClient.postAndReturn[Response](uri, payload, Coyote) { (json, response) => - expectCreated(json, response) - } - ids = response.ids - _ <- checkFileResourcesExist(destProjRef, ids) - _ <- checkFileContentsAreCopiedCorrectly(destProjRef, sourceFiles, ids) - _ <- assertFileUserMetadataWasCopiedCorrectly(destProjRef, sourceFiles, ids) - } yield succeed - } - - private def checkFileContentsAreCopiedCorrectly( - destProjRef: String, - sourceFiles: List[FileInput], - ids: List[String] - ) = - ids.zip(sourceFiles).traverse { case (destId, file) => - deltaClient - .get[ByteString](s"/files/$destProjRef/${UrlUtils.encode(destId)}", Coyote, acceptAll) { - expectFileContent(file.filename, file.contentType, file.contents, cacheable = true) - } - } - - def assertFileUserMetadataWasCopiedCorrectly( - destProjRef: String, - sourceFiles: List[FileInput], - ids: List[String] - ): IO[Assertion] = { - ids - .zip(sourceFiles) - .parTraverse { case (id, file) => - val metadata = file.metadata.value - val name = metadata.name.value - val description = metadata.description.value - val keywords = metadata.keywords - deltaClient.get[Json](s"/files/$destProjRef/${UrlUtils.encode(id)}", Coyote) { (json, response) => - response.status shouldEqual StatusCodes.OK - json should have(nameField(name)) - json should have(descriptionField(description)) - json should have(keywordsField(keywords)) - } - } - .map(_ => succeed) - } - - def checkFileResourcesExist(destProjRef: String, ids: List[String]) = - ids.traverse { id => - deltaClient.get[Json](s"/files/$destProjRef/${UrlUtils.encode(id)}", Coyote) { (json, response) => - response.status shouldEqual StatusCodes.OK - Optics.`@id`.getOption(json) shouldEqual Some(id) - } - } - - def givenANewProjectAndStorageInExistingOrg(org: String, tpe: CopyStorageType)( - test: StorageDetails => IO[Assertion] - ): IO[Assertion] = { - val proj = genId() - createProjects(Coyote, org, proj) >> - givenANewStorageInExistingProject(org, proj, tpe)(test) - } - - def givenANewStorageInExistingProject(org: String, proj: String, tpe: CopyStorageType)( - test: StorageDetails => IO[Assertion] - ): IO[Assertion] = tpe match { - case CopyStorageType.Disk => - val storageId = genId() - storagesDsl.createDiskStorageWithDefaultPerms(storageId, s"$org/$proj") >> - test(StorageDetails(org, proj, storageId)) - case CopyStorageType.Remote => givenANewRemoteStorageInExistingProject(org, proj)(test) - } - - def givenANewRemoteStorageInExistingProject(org: String, proj: String)( - test: StorageDetails => IO[Assertion] - ) = { - val (folder, storageId) = (genId(), genId()) - for { - _ <- storagesDsl.mkProtectedFolderInStorageService(folder) - _ <- storagesDsl.createRemoteStorageWithDefaultPerms(storageId, s"$org/$proj", folder) - result <- test(StorageDetails(org, proj, storageId)) - _ <- storagesDsl.deleteFolderInStorageService(folder) - } yield result - } - - def givenANewOrgProjectAndStorage(tpe: CopyStorageType)(test: StorageDetails => IO[Assertion]): IO[Assertion] = - givenANewProjectAndStorageInExistingOrg(genId(), tpe)(test) -} - -object BatchCopySpec { - - sealed trait CopyStorageType - object CopyStorageType { - case object Disk extends CopyStorageType - case object Remote extends CopyStorageType - } - - final case class StorageDetails(org: String, proj: String, storageId: String) { - def projRef: String = s"$org/$proj" - } - - final case class Response(ids: List[String]) - - object Response { - implicit val dec: Decoder[Response] = Decoder.instance { cur => - cur - .get[List[JsonObject]]("_results") - .flatMap(_.traverse(_.apply("@id").flatMap(_.asString).toRight(DecodingFailure("Missing id", Nil)))) - .map(Response(_)) - } - } -}