Skip to content

Commit

Permalink
@tus/s3-store: Change private modifier into protected (#698)
Browse files Browse the repository at this point in the history
  • Loading branch information
Vija02 authored Jan 17, 2025
1 parent 7182c7b commit b1c07bc
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 25 deletions.
5 changes: 5 additions & 0 deletions .changeset/cyan-hornets-repair.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"@tus/s3-store": minor
---

Change private modifier to protected
50 changes: 25 additions & 25 deletions packages/s3-store/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import path from 'node:path'

const log = debug('tus-node-server:stores:s3store')

type Options = {
export type Options = {
// The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
// The server calculates the optimal part size, which takes this size into account,
// but may increase it to not exceed the S3 10K parts limit.
Expand Down Expand Up @@ -82,13 +82,13 @@ function calcOffsetFromParts(parts?: Array<AWS.Part>) {
// For each incoming PATCH request (a call to `write`), a new part is uploaded
// to S3.
export class S3Store extends DataStore {
private bucket: string
private cache: KvStore<MetadataValue>
private client: S3
private preferredPartSize: number
private expirationPeriodInMilliseconds = 0
private useTags = true
private partUploadSemaphore: Semaphore
protected bucket: string
protected cache: KvStore<MetadataValue>
protected client: S3
protected preferredPartSize: number
protected expirationPeriodInMilliseconds = 0
protected useTags = true
protected partUploadSemaphore: Semaphore
public maxMultipartParts = 10_000 as const
public minPartSize = 5_242_880 as const // 5MiB
public maxUploadSize = 5_497_558_138_880 as const // 5TiB
Expand Down Expand Up @@ -131,7 +131,7 @@ export class S3Store extends DataStore {
* on the S3 object's `Metadata` field, so that only a `headObject`
* is necessary to retrieve the data.
*/
private async saveMetadata(upload: Upload, uploadId: string) {
protected async saveMetadata(upload: Upload, uploadId: string) {
log(`[${upload.id}] saving metadata`)
await this.client.putObject({
Bucket: this.bucket,
Expand All @@ -146,7 +146,7 @@ export class S3Store extends DataStore {
log(`[${upload.id}] metadata file saved`)
}

private async completeMetadata(upload: Upload) {
protected async completeMetadata(upload: Upload) {
if (!this.shouldUseExpirationTags()) {
return
}
Expand All @@ -169,7 +169,7 @@ export class S3Store extends DataStore {
* There's a small and simple caching mechanism to avoid multiple
* HTTP calls to S3.
*/
private async getMetadata(id: string): Promise<MetadataValue> {
protected async getMetadata(id: string): Promise<MetadataValue> {
const cached = await this.cache.get(id)
if (cached) {
return cached
Expand All @@ -196,11 +196,11 @@ export class S3Store extends DataStore {
return metadata
}

private infoKey(id: string) {
protected infoKey(id: string) {
return `${id}.info`
}

private partKey(id: string, isIncomplete = false) {
protected partKey(id: string, isIncomplete = false) {
if (isIncomplete) {
id += '.part'
}
Expand All @@ -212,7 +212,7 @@ export class S3Store extends DataStore {
return id
}

private async uploadPart(
protected async uploadPart(
metadata: MetadataValue,
readStream: fs.ReadStream | Readable,
partNumber: number
Expand All @@ -228,7 +228,7 @@ export class S3Store extends DataStore {
return data.ETag as string
}

private async uploadIncompletePart(
protected async uploadIncompletePart(
id: string,
readStream: fs.ReadStream | Readable
): Promise<string> {
Expand All @@ -242,7 +242,7 @@ export class S3Store extends DataStore {
return data.ETag as string
}

private async downloadIncompletePart(id: string) {
protected async downloadIncompletePart(id: string) {
const incompletePart = await this.getIncompletePart(id)

if (!incompletePart) {
Expand Down Expand Up @@ -301,7 +301,7 @@ export class S3Store extends DataStore {
}
}

private async getIncompletePart(id: string): Promise<Readable | undefined> {
protected async getIncompletePart(id: string): Promise<Readable | undefined> {
try {
const data = await this.client.getObject({
Bucket: this.bucket,
Expand All @@ -317,7 +317,7 @@ export class S3Store extends DataStore {
}
}

private async getIncompletePartSize(id: string): Promise<number | undefined> {
protected async getIncompletePartSize(id: string): Promise<number | undefined> {
try {
const data = await this.client.headObject({
Bucket: this.bucket,
Expand All @@ -332,7 +332,7 @@ export class S3Store extends DataStore {
}
}

private async deleteIncompletePart(id: string): Promise<void> {
protected async deleteIncompletePart(id: string): Promise<void> {
await this.client.deleteObject({
Bucket: this.bucket,
Key: this.partKey(id, true),
Expand All @@ -342,7 +342,7 @@ export class S3Store extends DataStore {
/**
* Uploads a stream to s3 using multiple parts
*/
private async uploadParts(
protected async uploadParts(
metadata: MetadataValue,
readStream: stream.Readable,
currentPartNumber: number,
Expand Down Expand Up @@ -429,7 +429,7 @@ export class S3Store extends DataStore {
* Completes a multipart upload on S3.
* This is where S3 concatenates all the uploaded parts.
*/
private async finishMultipartUpload(metadata: MetadataValue, parts: Array<AWS.Part>) {
protected async finishMultipartUpload(metadata: MetadataValue, parts: Array<AWS.Part>) {
const response = await this.client.completeMultipartUpload({
Bucket: this.bucket,
Key: metadata.file.id,
Expand All @@ -450,7 +450,7 @@ export class S3Store extends DataStore {
* Gets the number of complete parts/chunks already uploaded to S3.
* Retrieves only consecutive parts.
*/
private async retrieveParts(
protected async retrieveParts(
id: string,
partNumberMarker?: string
): Promise<Array<AWS.Part>> {
Expand Down Expand Up @@ -483,12 +483,12 @@ export class S3Store extends DataStore {
/**
* Removes cached data for a given file.
*/
private async clearCache(id: string) {
protected async clearCache(id: string) {
log(`[${id}] removing cached data`)
await this.cache.delete(id)
}

private calcOptimalPartSize(size?: number): number {
protected calcOptimalPartSize(size?: number): number {
// When upload size is not know we assume largest possible value (`maxUploadSize`)
if (size === undefined) {
size = this.maxUploadSize
Expand Down Expand Up @@ -776,7 +776,7 @@ export class S3Store extends DataStore {
return deleted
}

private async uniqueTmpFileName(template: string): Promise<string> {
protected async uniqueTmpFileName(template: string): Promise<string> {
let tries = 0
const maxTries = 10

Expand Down

0 comments on commit b1c07bc

Please sign in to comment.