From b1c07bc32929c555d007be648e7d8626c618fffa Mon Sep 17 00:00:00 2001 From: Michael Salim Date: Fri, 17 Jan 2025 09:34:52 +0000 Subject: [PATCH] @tus/s3-store: Change private modifier into protected (#698) --- .changeset/cyan-hornets-repair.md | 5 ++++ packages/s3-store/src/index.ts | 50 +++++++++++++++---------------- 2 files changed, 30 insertions(+), 25 deletions(-) create mode 100644 .changeset/cyan-hornets-repair.md diff --git a/.changeset/cyan-hornets-repair.md b/.changeset/cyan-hornets-repair.md new file mode 100644 index 00000000..bfc68a04 --- /dev/null +++ b/.changeset/cyan-hornets-repair.md @@ -0,0 +1,5 @@ +--- +"@tus/s3-store": minor +--- + +Change private modifier to protected diff --git a/packages/s3-store/src/index.ts b/packages/s3-store/src/index.ts index 640348dc..954f5395 100644 --- a/packages/s3-store/src/index.ts +++ b/packages/s3-store/src/index.ts @@ -24,7 +24,7 @@ import path from 'node:path' const log = debug('tus-node-server:stores:s3store') -type Options = { +export type Options = { // The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB. // The server calculates the optimal part size, which takes this size into account, // but may increase it to not exceed the S3 10K parts limit. @@ -82,13 +82,13 @@ function calcOffsetFromParts(parts?: Array) { // For each incoming PATCH request (a call to `write`), a new part is uploaded // to S3. export class S3Store extends DataStore { - private bucket: string - private cache: KvStore - private client: S3 - private preferredPartSize: number - private expirationPeriodInMilliseconds = 0 - private useTags = true - private partUploadSemaphore: Semaphore + protected bucket: string + protected cache: KvStore + protected client: S3 + protected preferredPartSize: number + protected expirationPeriodInMilliseconds = 0 + protected useTags = true + protected partUploadSemaphore: Semaphore public maxMultipartParts = 10_000 as const public minPartSize = 5_242_880 as const // 5MiB public maxUploadSize = 5_497_558_138_880 as const // 5TiB @@ -131,7 +131,7 @@ export class S3Store extends DataStore { * on the S3 object's `Metadata` field, so that only a `headObject` * is necessary to retrieve the data. */ - private async saveMetadata(upload: Upload, uploadId: string) { + protected async saveMetadata(upload: Upload, uploadId: string) { log(`[${upload.id}] saving metadata`) await this.client.putObject({ Bucket: this.bucket, @@ -146,7 +146,7 @@ export class S3Store extends DataStore { log(`[${upload.id}] metadata file saved`) } - private async completeMetadata(upload: Upload) { + protected async completeMetadata(upload: Upload) { if (!this.shouldUseExpirationTags()) { return } @@ -169,7 +169,7 @@ export class S3Store extends DataStore { * There's a small and simple caching mechanism to avoid multiple * HTTP calls to S3. */ - private async getMetadata(id: string): Promise { + protected async getMetadata(id: string): Promise { const cached = await this.cache.get(id) if (cached) { return cached @@ -196,11 +196,11 @@ export class S3Store extends DataStore { return metadata } - private infoKey(id: string) { + protected infoKey(id: string) { return `${id}.info` } - private partKey(id: string, isIncomplete = false) { + protected partKey(id: string, isIncomplete = false) { if (isIncomplete) { id += '.part' } @@ -212,7 +212,7 @@ export class S3Store extends DataStore { return id } - private async uploadPart( + protected async uploadPart( metadata: MetadataValue, readStream: fs.ReadStream | Readable, partNumber: number @@ -228,7 +228,7 @@ export class S3Store extends DataStore { return data.ETag as string } - private async uploadIncompletePart( + protected async uploadIncompletePart( id: string, readStream: fs.ReadStream | Readable ): Promise { @@ -242,7 +242,7 @@ export class S3Store extends DataStore { return data.ETag as string } - private async downloadIncompletePart(id: string) { + protected async downloadIncompletePart(id: string) { const incompletePart = await this.getIncompletePart(id) if (!incompletePart) { @@ -301,7 +301,7 @@ export class S3Store extends DataStore { } } - private async getIncompletePart(id: string): Promise { + protected async getIncompletePart(id: string): Promise { try { const data = await this.client.getObject({ Bucket: this.bucket, @@ -317,7 +317,7 @@ export class S3Store extends DataStore { } } - private async getIncompletePartSize(id: string): Promise { + protected async getIncompletePartSize(id: string): Promise { try { const data = await this.client.headObject({ Bucket: this.bucket, @@ -332,7 +332,7 @@ export class S3Store extends DataStore { } } - private async deleteIncompletePart(id: string): Promise { + protected async deleteIncompletePart(id: string): Promise { await this.client.deleteObject({ Bucket: this.bucket, Key: this.partKey(id, true), @@ -342,7 +342,7 @@ export class S3Store extends DataStore { /** * Uploads a stream to s3 using multiple parts */ - private async uploadParts( + protected async uploadParts( metadata: MetadataValue, readStream: stream.Readable, currentPartNumber: number, @@ -429,7 +429,7 @@ export class S3Store extends DataStore { * Completes a multipart upload on S3. * This is where S3 concatenates all the uploaded parts. */ - private async finishMultipartUpload(metadata: MetadataValue, parts: Array) { + protected async finishMultipartUpload(metadata: MetadataValue, parts: Array) { const response = await this.client.completeMultipartUpload({ Bucket: this.bucket, Key: metadata.file.id, @@ -450,7 +450,7 @@ export class S3Store extends DataStore { * Gets the number of complete parts/chunks already uploaded to S3. * Retrieves only consecutive parts. */ - private async retrieveParts( + protected async retrieveParts( id: string, partNumberMarker?: string ): Promise> { @@ -483,12 +483,12 @@ export class S3Store extends DataStore { /** * Removes cached data for a given file. */ - private async clearCache(id: string) { + protected async clearCache(id: string) { log(`[${id}] removing cached data`) await this.cache.delete(id) } - private calcOptimalPartSize(size?: number): number { + protected calcOptimalPartSize(size?: number): number { // When upload size is not know we assume largest possible value (`maxUploadSize`) if (size === undefined) { size = this.maxUploadSize @@ -776,7 +776,7 @@ export class S3Store extends DataStore { return deleted } - private async uniqueTmpFileName(template: string): Promise { + protected async uniqueTmpFileName(template: string): Promise { let tries = 0 const maxTries = 10