diff --git a/backend/.env.template b/backend/.env.template index dab8bee72..8531e6cd7 100644 --- a/backend/.env.template +++ b/backend/.env.template @@ -22,3 +22,4 @@ AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_ENDPOINT= AWS_REGION= +AWS_BUCKET= diff --git a/backend/src/config/index.js b/backend/src/config/index.js index 10332e798..4c81bb181 100644 --- a/backend/src/config/index.js +++ b/backend/src/config/index.js @@ -22,7 +22,7 @@ const { AWS_SECRET_ACCESS_KEY, AWS_ENDPOINT, AWS_REGION, - AWS_BUCKET = 'image-upload', + AWS_BUCKET, NEO4J_URI = 'bolt://localhost:7687', NEO4J_USERNAME = 'neo4j', NEO4J_PASSWORD = 'neo4j', @@ -71,12 +71,16 @@ export const developmentConfigs = { export const sentryConfigs = { SENTRY_DSN_BACKEND, COMMIT } export const redisConfigs = { REDIS_DOMAIN, REDIS_PORT, REDIS_PASSWORD } +const S3_CONFIGURED = + AWS_ACCESS_KEY_ID && AWS_SECRET_ACCESS_KEY && AWS_ENDPOINT && AWS_REGION && AWS_BUCKET + export const s3Configs = { AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_ENDPOINT, AWS_REGION, AWS_BUCKET, + S3_CONFIGURED, } export default { diff --git a/backend/src/db/migrations/20200312140328-bulk_upload_to_s3.js b/backend/src/db/migrations/20200312140328-bulk_upload_to_s3.js index 1ebc55daa..3143a32cc 100644 --- a/backend/src/db/migrations/20200312140328-bulk_upload_to_s3.js +++ b/backend/src/db/migrations/20200312140328-bulk_upload_to_s3.js @@ -21,9 +21,10 @@ export async function up(next) { AWS_ENDPOINT: endpoint, AWS_REGION: region, AWS_BUCKET: Bucket, + S3_CONFIGURED, } = s3Configs - if (!(accessKeyId || secretAccessKey)) { + if (!S3_CONFIGURED) { // eslint-disable-next-line no-console console.log('No S3 given, cannot upload image files') return diff --git a/backend/src/schema/resolvers/images/images.js b/backend/src/schema/resolvers/images/images.js index edcd1fd97..2558259f7 100644 --- a/backend/src/schema/resolvers/images/images.js +++ b/backend/src/schema/resolvers/images/images.js @@ -14,6 +14,7 @@ const { AWS_ENDPOINT: endpoint, AWS_REGION: region, AWS_BUCKET: Bucket, + S3_CONFIGURED, } = s3Configs export async function deleteImage(resource, relationshipType, opts = {}) { @@ -88,7 +89,10 @@ const wrapTransaction = async (wrappedCallback, args, opts) => { } } -const deleteImageFile = (image, deleteCallback = localFileDelete) => { +const deleteImageFile = (image, deleteCallback) => { + if (!deleteCallback) { + deleteCallback = S3_CONFIGURED ? s3Delete : localFileDelete + } const { url } = image deleteCallback(url) return url @@ -96,7 +100,7 @@ const deleteImageFile = (image, deleteCallback = localFileDelete) => { const uploadImageFile = async (upload, uploadCallback) => { if (!uploadCallback) { - uploadCallback = AWS_ACCESS_KEY_ID && AWS_SECRET_ACCESS_KEY ? s3Upload : localFileUpload + uploadCallback = S3_CONFIGURED ? s3Upload : localFileUpload } const { createReadStream, filename, mimetype } = await upload const { name, ext } = path.parse(filename) @@ -123,7 +127,7 @@ const localFileUpload = ({ createReadStream, destination }) => { const s3Upload = async ({ createReadStream, uniqueFilename, mimetype }) => { const s3 = new S3({ region, endpoint }) - const s3Location = `original${uniqueFilename}` + const s3Location = `original/${uniqueFilename}` const params = { Bucket, @@ -141,3 +145,14 @@ const localFileDelete = async url => { const location = `public${url}` if (existsSync(location)) unlinkSync(location) } + +const s3Delete = async url => { + const s3 = new S3({ region, endpoint }) + let { pathname } = new URL(url) + pathname = pathname.substring(1) // remove first character '/' + const params = { + Bucket, + Key: pathname, + } + await s3.deleteObject(params).promise() +}