diff --git a/src/functions/files.ts b/src/functions/files.ts index 1317dfd..e926975 100644 --- a/src/functions/files.ts +++ b/src/functions/files.ts @@ -7,6 +7,7 @@ import sharp = require('sharp') import logger from 'utils/logger' const Bucket = process.env.AWS_S3_BUCKET +const Region = process.env.AWS_REGION const imageSizes: Array<{ name: SizeName; maxWidth: number }> = [ { @@ -33,6 +34,15 @@ const imageSizes: Array<{ name: SizeName; maxWidth: number }> = [ type SizeName = 'original' | 'large' | 'medium' | 'small' | 'thumb' +type ACL = + | 'private' + | 'public-read' + | 'public-read-write' + | 'authenticated-read' + | 'aws-exec-read' + | 'bucket-owner-read' + | 'bucket-owner-full-control' + export const s3 = new S3({ accessKeyId: process.env.AWS_ACCESS_KEY_ID, secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY, @@ -41,6 +51,12 @@ export const s3 = new S3({ const UPLOAD_DIR = process.env.UPLOAD_DIR || 'uploads/' +export const FILE_LOCATIONS = { + LOCAL: 'local', + S3: 's3', + NOT_EXIST: 'not_exist', +} + export const getFilePath = (fileName: string) => path.resolve(UPLOAD_DIR, fileName) @@ -56,18 +72,18 @@ export const getFileLocation = async (fileName: string) => { if (existsOnS3) { logger.verbose(`[getFileLocation][%s] file location is S3`, fileName) - return 's3' + return FILE_LOCATIONS.S3 } const existsOnLocal = fs.existsSync(getFilePath(fileName)) if (existsOnLocal) { logger.verbose(`[getFileLocation][%s] file location is LOCAL`, fileName) - return 'local' + return FILE_LOCATIONS.LOCAL } logger.verbose(`[getFileLocation][%s] file location is NOT_EXIST`, fileName) - return 'not_exist' + return FILE_LOCATIONS.NOT_EXIST } export const fileExists = async (fileName: string) => { @@ -107,6 +123,15 @@ const readFileFromS3 = (fileName: string) => { }) } +export const readFileBufferFromLocal = async (fileName: string) => { + if (fs.existsSync(getFilePath(fileName))) { + logger.verbose(`[readFileBuffer][%s] File found on local`, fileName) + return fs.readFileSync(getFilePath(fileName)) + } + + return +} + export const readFileBuffer = async (fileName: string) => { logger.verbose(`[readFileBuffer][%s] Getting file buffer`, fileName) @@ -137,7 +162,9 @@ export const uploadFileToS3 = async (fileName: string) => { const fileBuffer = fs.readFileSync(getFilePath(fileName)) - return s3.putObject({ Bucket, Key: fileName, Body: fileBuffer }).promise() + return s3 + .putObject({ Bucket, Key: fileName, Body: fileBuffer, ACL: 'public-read' }) + .promise() } export const getFileMimeType = async (fileName: string) => { @@ -150,7 +177,14 @@ export const getFileMimeType = async (fileName: string) => { return await getFileType(fileBuffer) } -const generateFileNameWithSize = (fileName: string, sizeName: SizeName) => { +export const generateFileNameWithSize = ( + fileName: string, + sizeName?: SizeName, +) => { + if (!sizeName) { + return fileName + } + const sizeSuffix = sizeName === 'original' ? '' : `-${sizeName}` return /\./.test(fileName) @@ -280,6 +314,128 @@ export const reUploadImageToS3AndRemove = async (startAt: number = 0) => { } } +/** + * @see https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#listObjects-property + */ +export const listObjects = ({ + limit, + marker, +}: { + /** Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more. */ + limit?: number + /** Specifies the key to start with when listing objects in a bucket. */ + marker?: string +} = {}) => { + return s3 + .listObjects({ + Bucket, + // up to 1000 + MaxKeys: limit, + Marker: marker, + }) + .promise() +} + +export const listAllObjects = async ( + startAt?: number, + marker?: string, + prevObjects?: S3.Object[], +): Promise => { + logger.verbose( + `[Get objects from ${startAt}000-${startAt + 1}000]: marker ${marker}`, + ) + const response: S3.ListObjectsOutput = await listObjects({ + marker, + }) + + const objects = prevObjects.concat(response.Contents) + + if (response.IsTruncated) { + return await listAllObjects( + startAt + 1, + response.Contents.slice(-1)[0].Key, + objects, + ) + } + + return objects +} + +export const putObjectACL = ({ key, acl }: { key: string; acl: ACL }) => { + return s3 + .putObjectAcl({ + Bucket, + Key: key, + ACL: acl, + }) + .promise() +} + +export const rePutAllErrorObjectsACL = async ( + prevObjects: S3.Object[], +): Promise => { + logger.verbose(`[Re put error object ACL]: ${prevObjects.length} objects`) + + const objects = (await Promise.all( + prevObjects.map(async item => { + try { + await putObjectACL({ key: item.Key, acl: 'public-read' }) + return null + } catch (err) { + logger.error(`[Put object ACL error]: ${item.Key}`, err) + return item + } + }), + )).filter(object => object) + + if (objects.length < 1) { + logger.info(`[Re put error object ACL has finished]`) + } else { + return await rePutAllErrorObjectsACL(objects) + } +} + +export const rePutAllObjectsACL = async ( + startAt: number = 0, + marker?: string, + prevErrorObjects?: S3.Object[], +): Promise => { + let errorObjects: S3.Object[] = prevErrorObjects + + logger.verbose( + `[Put object ACL from ${startAt}000-${startAt + 1}000]: marker ${marker}`, + ) + + const response: S3.ListObjectsOutput = await listObjects({ + marker, + }) + + await Promise.all( + response.Contents.map(async item => { + try { + return await putObjectACL({ key: item.Key, acl: 'public-read' }) + } catch (err) { + errorObjects = errorObjects.concat(item) + logger.error(`[Put object ACL error]: ${item.Key}`, err) + } + }), + ) + + if (response.IsTruncated) { + return await rePutAllObjectsACL( + startAt + 1, + response.Contents.slice(-1)[0].Key, + errorObjects, + ) + } else { + logger.info( + `[Put object ACl has finished] with ${errorObjects.length} errors`, + ) + + return await rePutAllErrorObjectsACL(errorObjects) + } +} + export const getObjectUrl = ( fileName: string, { size = 'original' }: { size?: SizeName }, @@ -292,3 +448,12 @@ export const getObjectUrl = ( Expires: 60 * 24 * 7, }) } + +export const getObjectPublicUrl = ( + fileName: string, + { size = 'original' }: { size?: SizeName } = {}, +) => { + const fileNameWithSize = generateFileNameWithSize(fileName, size) + + return `https://${Bucket}.s3.${Region}.amazonaws.com/${fileNameWithSize}` +} diff --git a/src/routes/index.ts b/src/routes/index.ts index 8f05261..b82320d 100644 --- a/src/routes/index.ts +++ b/src/routes/index.ts @@ -1,42 +1,20 @@ import express from 'express' -import ExpressRedisCache from 'express-redis-cache' -import fileType from 'file-type' -import { getObjectUrl, readFileBuffer } from 'functions/files' -import { processImage } from 'functions/images' +import { + FILE_LOCATIONS, + generateFileNameWithSize, + getFileLocation, + getObjectPublicUrl, +} from 'functions/files' import { filesProcessing, multer, renameFilesToChecksum, } from 'middlewares/files' -import path from 'path' -import redis from 'redis' import logger from 'utils/logger' const router = express.Router() -const redisClient = redis.createClient({ url: process.env.REDIS_URI }) - -const DEFAULT_TTL = +(process.env.CACHE_TTL || 60) - -const cache = ExpressRedisCache({ - client: redisClient, - prefix: 'file', - expire: DEFAULT_TTL, // 1 min, -}) - -cache.on('message', message => logger.verbose('Cached %s', message)) -cache.on('connected', () => logger.verbose('Cache redis server connected')) -cache.on('disconnected', () => logger.verbose('Cache redis server connected')) -cache.on('error', error => logger.error('Cache redis server error %o', error)) -cache.on('deprecated', deprecated => - logger.warning('deprecated warning', { - type: deprecated.type, - name: deprecated.name, - substitute: deprecated.substitute, - file: deprecated.file, - line: deprecated.line, - }), -) +const DEFAULT_IMAGE_NAME = '8acd942c9940ce0a7df1a8e15d4bad81' router.put( '/images', @@ -49,73 +27,25 @@ router.put( }, ) -router.get( - '/:fileName', - (req, res, next) => { - const { cache: enableCache = 'true' } = req.query - - if ( - enableCache === 'false' || - process.env.DISABLE_EXPRESS_CACHING === 'true' - ) { - return next() - } - - // const imageFormat = req.query.format || 'webp' +router.get('/:fileName', async (req, res, next) => { + const fileName: string = req.params.fileName - res.express_redis_cache_name = `${req.originalUrl}` - return cache.route({ - binary: true, - expire: { - 200: DEFAULT_TTL, - 404: 15, - xxx: 1, - }, - })(req, res, next) - }, - async (req, res, next) => { - const fileName: string = req.params.fileName - // const imageFormat = req.query.format - - logger.verbose('Getting file %s', fileName) - - res.redirect(getObjectUrl(fileName, req.query), 301) - - // try { - // const fileBuffer = await readFileBuffer(fileName) + logger.verbose('Getting file %s', fileName) - // if (!fileBuffer) { - // return res - // .header('Cache-Control', 'private') - // .status(404) - // .sendFile(path.resolve(__dirname, '../../static/empty.webp')) - // } + const fileNameWithSize = generateFileNameWithSize( + fileName, + req.query && req.query.size, + ) - // const optimizedFileBuffer = fileType(fileBuffer).mime.startsWith('image/') - // ? await (await processImage( - // fileBuffer, - // req.query, - // imageFormat === 'jpeg' ? 'jpeg' : 'webp', - // )).toBuffer() - // : fileBuffer + const location = await getFileLocation(fileNameWithSize) - // logger.verbose( - // 'Downloaded file %s %s', - // fileName, - // fileType(fileBuffer).mime, - // ) + switch (location) { + case FILE_LOCATIONS.S3: + return res.redirect(301, getObjectPublicUrl(fileName, req.query)) - // logger.info(getObjectUrl(fileName)) - - // res - // .header('Cache-Control', 'public, max-age=31536000') - // .contentType(fileType(optimizedFileBuffer).mime) - // .send(optimizedFileBuffer) - // } catch (err) { - // logger.error(err) - // throw err - // } - }, -) + default: + return res.redirect(301, getObjectPublicUrl(DEFAULT_IMAGE_NAME)) + } +}) export default router diff --git a/static/empty.jpg b/static/empty.jpg new file mode 100644 index 0000000..e47e525 Binary files /dev/null and b/static/empty.jpg differ diff --git a/static/empty.webp b/static/empty.webp deleted file mode 100644 index ac60f67..0000000 Binary files a/static/empty.webp and /dev/null differ