import { S3Client, PutObjectCommand } from '@aws-sdk/client-s3';
import { getSignedUrl } from '@aws-sdk/s3-request-presigner';
import crypto from 'node:crypto';
const s3 = new S3Client({ region: process.env.AWS_REGION });
export async function presignUpload(userId: string, contentType: string) {
if (!['image/png', 'image/jpeg', 'application/pdf'].includes(contentType)) {
throw new Error('Unsupported content type');
}
const key = `uploads/${userId}/${crypto.randomUUID()}`;
const cmd = new PutObjectCommand({
Bucket: process.env.S3_BUCKET!,
Key: key,
ContentType: contentType
});
const url = await getSignedUrl(s3, cmd, { expiresIn: 60 });
return { url, key };
}
Large file uploads don’t belong on app servers. My default is: the server issues a short-lived pre-signed URL, the browser uploads directly to S3, then the server stores the object key. That keeps latency low and costs predictable. Before signing, I validate content-type and max size because pre-signed URLs are effectively temporary credentials. I also include a random prefix and the user id in the key so listing and cleanup are easy later. Finally, I store only the key in the DB (not a full URL) so I can change bucket/CDN settings without rewriting records. This pattern scales cleanly and keeps your API instances focused on CPU-bound work.