mirror of
https://github.com/Ocelot-Social-Community/Ocelot-Social.git
synced 2025-12-13 07:46:06 +00:00
* feat(backend): resize images with imagor Open questions: * Do we have external URLs for images? E.g. we have them for seeds. But in production? * Do we want to apply image transformations on these as well? My current implementation does not apply image transformations as of now. If we want to do that, we will also expose internal URLs in the kubernetes Cluster to the S3 endpoint to the client. TODOs: * The chat component is using a fixed size for all avatars at the moment. Maybe we can pair-program on this how to implement responsive images in this component library. Commits: * do not replace upload domain url in the database * fix all webapp specs * refactor: remove behaviour we won't need We don't want to apply image transformations on files, right? * refactor: replace the domain on read not on write * wip: webapp fixes * refactor(backend): add another url to config I've given up. There seems to be no nice way to tell the minio to return a location which differs from it's host name. * refactor: add test for s3Service * refactor(backend): proxy minio via backend in local development Commits: * provide tests for message attachments * remove S3_PUBLIC_URL config value * refactor: follow @ulfgebhardt's review * add missing environment variable --------- Co-authored-by: Ulf Gebhardt <ulf.gebhardt@webcraft-media.de>
65 lines
1.9 KiB
TypeScript
65 lines
1.9 KiB
TypeScript
import { S3Client, DeleteObjectCommand, ObjectCannedACL } from '@aws-sdk/client-s3'
|
|
import { Upload } from '@aws-sdk/lib-storage'
|
|
|
|
import type { S3Config } from '@config/index'
|
|
|
|
import { FileUploadCallback, FileDeleteCallback } from './types'
|
|
|
|
export const s3Service = (config: S3Config, prefix: string) => {
|
|
const { AWS_BUCKET: Bucket } = config
|
|
|
|
const { AWS_ENDPOINT, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY } = config
|
|
const s3 = new S3Client({
|
|
credentials: {
|
|
accessKeyId: AWS_ACCESS_KEY_ID,
|
|
secretAccessKey: AWS_SECRET_ACCESS_KEY,
|
|
},
|
|
endpoint: AWS_ENDPOINT,
|
|
forcePathStyle: true,
|
|
})
|
|
|
|
const uploadFile: FileUploadCallback = async ({ createReadStream, uniqueFilename, mimetype }) => {
|
|
const s3Location = prefix.length > 0 ? `${prefix}/${uniqueFilename}` : uniqueFilename
|
|
|
|
const params = {
|
|
Bucket,
|
|
Key: s3Location,
|
|
ACL: ObjectCannedACL.public_read,
|
|
ContentType: mimetype,
|
|
Body: createReadStream(),
|
|
}
|
|
const command = new Upload({ client: s3, params })
|
|
const data = await command.done()
|
|
let { Location: location } = data
|
|
if (!location) {
|
|
throw new Error('File upload did not return `Location`')
|
|
}
|
|
|
|
if (!location.startsWith('https://') && !location.startsWith('http://')) {
|
|
// Ensure the location has a protocol. Hetzner sometimes does not return a protocol in the location.
|
|
location = `https://${location}`
|
|
}
|
|
|
|
return location
|
|
}
|
|
|
|
const deleteFile: FileDeleteCallback = async (url) => {
|
|
let { pathname } = new URL(url, 'http://example.org') // dummy domain to avoid invalid URL error
|
|
pathname = pathname.substring(1) // remove first character '/'
|
|
const prefix = `${Bucket}/`
|
|
if (pathname.startsWith(prefix)) {
|
|
pathname = pathname.slice(prefix.length)
|
|
}
|
|
const params = {
|
|
Bucket,
|
|
Key: pathname,
|
|
}
|
|
await s3.send(new DeleteObjectCommand(params))
|
|
}
|
|
|
|
return {
|
|
uploadFile,
|
|
deleteFile,
|
|
}
|
|
}
|