Skip to content

Commit

Permalink
feat: add Amazon S3 provider (#121)
Browse files Browse the repository at this point in the history
  • Loading branch information
luwes authored Dec 18, 2023
1 parent b36a6c3 commit 1d56169
Show file tree
Hide file tree
Showing 8 changed files with 291 additions and 21 deletions.
65 changes: 51 additions & 14 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -238,23 +238,60 @@ Supported providers with their required environment variables:
| [`mux`](https://mux.com) (default) | `MUX_TOKEN_ID`<br/>`MUX_TOKEN_SECRET` | | [Pricing](https://www.mux.com/pricing/video) |
| [`vercel-blob`](https://vercel.com/docs/storage/vercel-blob) | `BLOB_READ_WRITE_TOKEN` | | [Pricing](https://vercel.com/docs/storage/vercel-blob/usage-and-pricing) |
| [`backblaze`](https://www.backblaze.com/cloud-storage) | `BACKBLAZE_ACCESS_KEY_ID`<br/>`BACKBLAZE_SECRET_ACCESS_KEY` | `endpoint`<br/>`bucket` (optional) | [Pricing](https://www.backblaze.com/cloud-storage/pricing) |
| More coming... | | |
| [`amazon-s3`](https://aws.amazon.com/s3) | `AWS_ACCESS_KEY_ID`<br/>`AWS_SECRET_ACCESS_KEY` | `endpoint`<br/>`bucket` (optional) | [Pricing](https://aws.amazon.com/s3/pricing/) |
| More coming... | | | |


#### Provider feature set

| | Mux (default) | Vercel Blob | Backblaze |
| ---------------------------- | ------------- | ----------- | ----------- |
| Off-repo storage ||||
| Delivery via CDN ||| - |
| BYO player ||||
| Compressed for streaming || - | - |
| Adapt to slow networks (HLS) || - | - |
| Automatic placeholder poster || - | - |
| Timeline hover thumbnails || - | - |
| Stream any soure format || - | - |
| AI captions & subtitles || - | - |
| Video analytics || - | - |
| Pricing | Minutes-based | GB-based | GB-based |
| | Mux (default) | Vercel Blob | Backblaze | Amazon S3 |
| ---------------------------- | ------------- | ----------- | --------- | --------- |
| Off-repo storage |||||
| Delivery via CDN ||| - | - |
| BYO player |||||
| Compressed for streaming || - | - | - |
| Adapt to slow networks (HLS) || - | - | - |
| Automatic placeholder poster || - | - | - |
| Timeline hover thumbnails || - | - | - |
| Stream any soure format || - | - | - |
| AI captions & subtitles || - | - | - |
| Video analytics || - | - | - |
| Pricing | Minutes-based | GB-based | GB-based | GB-based |


## Required Permissions for Amazon S3

If you're using Amazon S3 as the provider, you'll need to create a new IAM user with the following permissions:

```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListAllMyBuckets",
"s3:CreateBucket",
"s3:PutBucketOwnershipControls"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:PutBucketPublicAccessBlock",
"s3:PutBucketAcl",
"s3:PutBucketCORS",
"s3:GetObject",
"s3:PutObject",
"s3:PutObjectAcl",
"s3:ListBucket"
],
"Resource": "arn:aws:s3:::next-videos-*"
}
]
}
```

## Roadmap

Expand Down
6 changes: 5 additions & 1 deletion src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,11 @@ export type VideoConfigComplete = {
backblaze?: {
endpoint: string;
bucket?: string;
};
},
'amazon-s3'?: {
endpoint: string;
bucket?: string;
},
}
}

Expand Down
191 changes: 191 additions & 0 deletions src/providers/amazon-s3/provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
import { ReadStream, createReadStream } from 'node:fs';
import { Readable } from 'node:stream';
import fs from 'node:fs/promises';
import path from 'node:path';
import { env } from 'node:process';
import { fetch as uFetch } from 'undici';
import chalk from 'chalk';
import cuid2 from '@paralleldrive/cuid2';
import { S3Client } from '@aws-sdk/client-s3';

import { updateAsset, Asset } from '../../assets.js';
import { getVideoConfig } from '../../config.js';
import { findBucket, createBucket, putBucketCors, putObject, putBucketAcl } from '../../utils/s3.js';
import log from '../../utils/logger.js';

export type AmazonS3Metadata = {
bucket?: string;
endpoint?: string;
}

// Why 11?
// - Reasonable id length visually in the src URL
// - Familiarity with the length of YouTube IDs
// - It would take more than 300 million buckets to have a 50% chance of a collision.
// - "These go to eleven" https://www.youtube.com/watch?v=F7IZZXQ89Oc
const createId = cuid2.init({ length: 11 });

let s3: S3Client;
let bucketName: string;
let endpoint: string;

async function initS3() {
const { providerConfig } = await getVideoConfig();
const amazonS3Config = providerConfig['amazon-s3'];
bucketName = amazonS3Config?.bucket ?? '';
endpoint = amazonS3Config?.endpoint ?? '';

const regionMatch = endpoint.match(/\.([a-z0-9-]+)\.amazonaws\.com$/);
const region = regionMatch ? regionMatch[1] : '';

s3 ??= new S3Client({
endpoint,
region,
credentials: {
accessKeyId: env.AWS_ACCESS_KEY_ID ?? '',
secretAccessKey: env.AWS_SECRET_ACCESS_KEY ?? '',
}
});

if (!bucketName) {
try {
const bucket = await findBucket(s3, bucket => bucket.Name?.startsWith('next-videos-'));

if (bucket) {
bucketName = bucket.Name!;
log.info(log.label('Using existing Amazon S3 bucket:'), bucketName);
}
} catch (err) {
log.error('Error listing Amazon S3 buckets');
console.error(err);
}
}

if (!bucketName) {
bucketName = `next-videos-${createId()}`;
log.info(log.label('Creating Amazon S3 bucket:'), bucketName);

try {
await createBucket(s3, bucketName, {
// https://aws.amazon.com/blogs/aws/heads-up-amazon-s3-security-changes-are-coming-in-april-of-2023/

// Can't set ACL here since the security changes, but we can set it after the bucket is created.
// S3ServiceException [InvalidBucketAclWithBlockPublicAccessError]: Bucket cannot have public ACLs set with BlockPublicAccess enabled
// ACL: 'public-read',

// Since the security changes the default ObjectOwnership is BucketOwnerEnforced which doesn't allow ACLs. Change it here.
// InvalidBucketAclWithObjectOwnership: Bucket cannot have ACLs set with ObjectOwnership's BucketOwnerEnforced setting
ObjectOwnership: 'ObjectWriter'
});
await putBucketAcl(s3, bucketName);
await putBucketCors(s3, bucketName);
} catch (err) {
log.error('Error creating Amazon S3 bucket');
console.error(err);
}
}
}

export async function uploadLocalFile(asset: Asset) {
const filePath = asset.originalFilePath;

if (!filePath) {
log.error('No filePath provided for asset.');
console.error(asset);
return;
}

// Handle imported remote videos.
if (filePath && /^https?:\/\//.test(filePath)) {
return uploadRequestedFile(asset);
}

if (asset.status === 'ready') {
return;
} else if (asset.status === 'uploading') {
// Right now this re-starts the upload from the beginning.
// We should probably do something smarter here.
log.info(log.label('Resuming upload:'), filePath);
}

await updateAsset(filePath, {
status: 'uploading'
});

await initS3();

const fileStats = await fs.stat(filePath);
const stream = createReadStream(filePath);

return putAsset(filePath, fileStats.size, stream);
}

export async function uploadRequestedFile(asset: Asset) {
const filePath = asset.originalFilePath;

if (!filePath) {
log.error('No URL provided for asset.');
console.error(asset);
return;
}

if (asset.status === 'ready') {
return;
}

await updateAsset(filePath, {
status: 'uploading'
});

await initS3();

const response = await uFetch(filePath);
const size = Number(response.headers.get('content-length'));
const stream = response.body;

if (!stream) {
log.error('Error fetching the requested file:', filePath);
return;
}

return putAsset(filePath, size, Readable.fromWeb(stream));
}

async function putAsset(filePath: string, size: number, stream: ReadStream | Readable) {
log.info(log.label('Uploading file:'), `${filePath} (${size} bytes)`);

try {
await putObject(s3, {
ACL: 'public-read',
Bucket: bucketName,
Key: path.basename(filePath),
Body: stream,
ContentLength: size,
});

if (stream instanceof ReadStream) {
stream.close();
}
} catch (e) {
log.error('Error uploading to Amazon S3');
console.error(e);
return;
}

log.success(log.label('File uploaded:'), `${filePath} (${size} bytes)`);

const updatedAsset = await updateAsset(filePath, {
status: 'ready',
providerMetadata: {
'amazon-s3': {
endpoint,
bucket: bucketName,
} as AmazonS3Metadata
},
});

const url = updatedAsset.sources?.[0].src;
log.space(chalk.gray('>'), log.label('URL:'), url);

return updatedAsset;
}
19 changes: 19 additions & 0 deletions src/providers/amazon-s3/transformer.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import type { Asset, AssetSource } from '../../assets.js';

export function transform(asset: Asset) {
const providerMetadata = asset.providerMetadata?.['amazon-s3'];
if (!providerMetadata) return asset;

const src = new URL(providerMetadata.endpoint);
src.hostname = `${providerMetadata.bucket}.${src.hostname}`;

const basename = asset.originalFilePath.split('/').pop();
if (basename) src.pathname = basename

const source: AssetSource = { src: `${src}` };

return {
...asset,
sources: [source],
};
}
4 changes: 3 additions & 1 deletion src/providers/backblaze/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,9 @@ async function initS3() {
log.info(log.label('Creating Backblaze bucket:'), bucketName);

try {
await createBucket(s3, bucketName);
await createBucket(s3, bucketName, {
ACL: 'public-read',
});
await putBucketCors(s3, bucketName);
} catch (err) {
log.error('Error creating Backblaze bucket');
Expand Down
1 change: 1 addition & 0 deletions src/providers/providers.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
export * as mux from './mux/provider.js';
export * as vercelBlob from './vercel-blob/provider.js';
export * as backblaze from './backblaze/provider.js';
export * as amazonS3 from './amazon-s3/provider.js';
1 change: 1 addition & 0 deletions src/providers/transformers.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
export * as mux from './mux/transformer.js';
export * as vercelBlob from './vercel-blob/transformer.js';
export * as backblaze from './backblaze/transformer.js';
export * as amazonS3 from './amazon-s3/transformer.js';
25 changes: 20 additions & 5 deletions src/utils/s3.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,23 +3,38 @@ import {
PutBucketCorsCommand,
CreateBucketCommand,
PutObjectCommand,
PutObjectCommandInput,
ListBucketsCommand
ListBucketsCommand,
DeletePublicAccessBlockCommand,
PutBucketAclCommand,
} from '@aws-sdk/client-s3';

export async function findBucket(s3: S3Client, callbackFn: (bucket: { Name?: string }) => boolean | void) {
const { Buckets } = await s3.send(new ListBucketsCommand({}));
return Buckets?.find(callbackFn);
}

export function createBucket(s3: S3Client, bucketName: string) {
export function createBucket(s3: S3Client, bucketName: string, input?: Partial<CreateBucketCommand['input']>) {
return s3.send(new CreateBucketCommand({
Bucket: bucketName,
ACL: 'public-read',
...input
}));
}

export function putObject(s3: S3Client, input: PutObjectCommandInput) {
export async function putBucketAcl(s3: S3Client, bucketName: string, input?: Partial<PutBucketAclCommand['input']>) {
// Remove the public access block that is created by default.
// https://aws.amazon.com/blogs/aws/heads-up-amazon-s3-security-changes-are-coming-in-april-of-2023/
await s3.send(new DeletePublicAccessBlockCommand({
Bucket: bucketName
}));

return s3.send(new PutBucketAclCommand({
Bucket: bucketName,
ACL: input?.ACL ?? 'public-read',
...input,
}));
}

export function putObject(s3: S3Client, input: PutObjectCommand['input']) {
return s3.send(new PutObjectCommand(input));
}

Expand Down

0 comments on commit 1d56169

Please sign in to comment.