diff --git a/README.md b/README.md
index d567ed0..7798551 100644
--- a/README.md
+++ b/README.md
@@ -238,23 +238,60 @@ Supported providers with their required environment variables:
| [`mux`](https://mux.com) (default) | `MUX_TOKEN_ID`
`MUX_TOKEN_SECRET` | | [Pricing](https://www.mux.com/pricing/video) |
| [`vercel-blob`](https://vercel.com/docs/storage/vercel-blob) | `BLOB_READ_WRITE_TOKEN` | | [Pricing](https://vercel.com/docs/storage/vercel-blob/usage-and-pricing) |
| [`backblaze`](https://www.backblaze.com/cloud-storage) | `BACKBLAZE_ACCESS_KEY_ID`
`BACKBLAZE_SECRET_ACCESS_KEY` | `endpoint`
`bucket` (optional) | [Pricing](https://www.backblaze.com/cloud-storage/pricing) |
-| More coming... | | |
+| [`amazon-s3`](https://aws.amazon.com/s3) | `AWS_ACCESS_KEY_ID`
`AWS_SECRET_ACCESS_KEY` | `endpoint`
`bucket` (optional) | [Pricing](https://aws.amazon.com/s3/pricing/) |
+| More coming... | | | |
+
#### Provider feature set
-| | Mux (default) | Vercel Blob | Backblaze |
-| ---------------------------- | ------------- | ----------- | ----------- |
-| Off-repo storage | ✅ | ✅ | ✅ |
-| Delivery via CDN | ✅ | ✅ | - |
-| BYO player | ✅ | ✅ | ✅ |
-| Compressed for streaming | ✅ | - | - |
-| Adapt to slow networks (HLS) | ✅ | - | - |
-| Automatic placeholder poster | ✅ | - | - |
-| Timeline hover thumbnails | ✅ | - | - |
-| Stream any soure format | ✅ | - | - |
-| AI captions & subtitles | ✅ | - | - |
-| Video analytics | ✅ | - | - |
-| Pricing | Minutes-based | GB-based | GB-based |
+| | Mux (default) | Vercel Blob | Backblaze | Amazon S3 |
+| ---------------------------- | ------------- | ----------- | --------- | --------- |
+| Off-repo storage | ✅ | ✅ | ✅ | ✅ |
+| Delivery via CDN | ✅ | ✅ | - | - |
+| BYO player | ✅ | ✅ | ✅ | ✅ |
+| Compressed for streaming | ✅ | - | - | - |
+| Adapt to slow networks (HLS) | ✅ | - | - | - |
+| Automatic placeholder poster | ✅ | - | - | - |
+| Timeline hover thumbnails | ✅ | - | - | - |
+| Stream any soure format | ✅ | - | - | - |
+| AI captions & subtitles | ✅ | - | - | - |
+| Video analytics | ✅ | - | - | - |
+| Pricing | Minutes-based | GB-based | GB-based | GB-based |
+
+
+## Required Permissions for Amazon S3
+
+If you're using Amazon S3 as the provider, you'll need to create a new IAM user with the following permissions:
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:ListAllMyBuckets",
+ "s3:CreateBucket",
+ "s3:PutBucketOwnershipControls"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:PutBucketPublicAccessBlock",
+ "s3:PutBucketAcl",
+ "s3:PutBucketCORS",
+ "s3:GetObject",
+ "s3:PutObject",
+ "s3:PutObjectAcl",
+ "s3:ListBucket"
+ ],
+ "Resource": "arn:aws:s3:::next-videos-*"
+ }
+ ]
+}
+```
## Roadmap
diff --git a/src/config.ts b/src/config.ts
index ff748c6..03e5d70 100644
--- a/src/config.ts
+++ b/src/config.ts
@@ -20,7 +20,11 @@ export type VideoConfigComplete = {
backblaze?: {
endpoint: string;
bucket?: string;
- };
+ },
+ 'amazon-s3'?: {
+ endpoint: string;
+ bucket?: string;
+ },
}
}
diff --git a/src/providers/amazon-s3/provider.ts b/src/providers/amazon-s3/provider.ts
new file mode 100644
index 0000000..91a3ddb
--- /dev/null
+++ b/src/providers/amazon-s3/provider.ts
@@ -0,0 +1,191 @@
+import { ReadStream, createReadStream } from 'node:fs';
+import { Readable } from 'node:stream';
+import fs from 'node:fs/promises';
+import path from 'node:path';
+import { env } from 'node:process';
+import { fetch as uFetch } from 'undici';
+import chalk from 'chalk';
+import cuid2 from '@paralleldrive/cuid2';
+import { S3Client } from '@aws-sdk/client-s3';
+
+import { updateAsset, Asset } from '../../assets.js';
+import { getVideoConfig } from '../../config.js';
+import { findBucket, createBucket, putBucketCors, putObject, putBucketAcl } from '../../utils/s3.js';
+import log from '../../utils/logger.js';
+
+export type AmazonS3Metadata = {
+ bucket?: string;
+ endpoint?: string;
+}
+
+// Why 11?
+// - Reasonable id length visually in the src URL
+// - Familiarity with the length of YouTube IDs
+// - It would take more than 300 million buckets to have a 50% chance of a collision.
+// - "These go to eleven" https://www.youtube.com/watch?v=F7IZZXQ89Oc
+const createId = cuid2.init({ length: 11 });
+
+let s3: S3Client;
+let bucketName: string;
+let endpoint: string;
+
+async function initS3() {
+ const { providerConfig } = await getVideoConfig();
+ const amazonS3Config = providerConfig['amazon-s3'];
+ bucketName = amazonS3Config?.bucket ?? '';
+ endpoint = amazonS3Config?.endpoint ?? '';
+
+ const regionMatch = endpoint.match(/\.([a-z0-9-]+)\.amazonaws\.com$/);
+ const region = regionMatch ? regionMatch[1] : '';
+
+ s3 ??= new S3Client({
+ endpoint,
+ region,
+ credentials: {
+ accessKeyId: env.AWS_ACCESS_KEY_ID ?? '',
+ secretAccessKey: env.AWS_SECRET_ACCESS_KEY ?? '',
+ }
+ });
+
+ if (!bucketName) {
+ try {
+ const bucket = await findBucket(s3, bucket => bucket.Name?.startsWith('next-videos-'));
+
+ if (bucket) {
+ bucketName = bucket.Name!;
+ log.info(log.label('Using existing Amazon S3 bucket:'), bucketName);
+ }
+ } catch (err) {
+ log.error('Error listing Amazon S3 buckets');
+ console.error(err);
+ }
+ }
+
+ if (!bucketName) {
+ bucketName = `next-videos-${createId()}`;
+ log.info(log.label('Creating Amazon S3 bucket:'), bucketName);
+
+ try {
+ await createBucket(s3, bucketName, {
+ // https://aws.amazon.com/blogs/aws/heads-up-amazon-s3-security-changes-are-coming-in-april-of-2023/
+
+ // Can't set ACL here since the security changes, but we can set it after the bucket is created.
+ // S3ServiceException [InvalidBucketAclWithBlockPublicAccessError]: Bucket cannot have public ACLs set with BlockPublicAccess enabled
+ // ACL: 'public-read',
+
+ // Since the security changes the default ObjectOwnership is BucketOwnerEnforced which doesn't allow ACLs. Change it here.
+ // InvalidBucketAclWithObjectOwnership: Bucket cannot have ACLs set with ObjectOwnership's BucketOwnerEnforced setting
+ ObjectOwnership: 'ObjectWriter'
+ });
+ await putBucketAcl(s3, bucketName);
+ await putBucketCors(s3, bucketName);
+ } catch (err) {
+ log.error('Error creating Amazon S3 bucket');
+ console.error(err);
+ }
+ }
+}
+
+export async function uploadLocalFile(asset: Asset) {
+ const filePath = asset.originalFilePath;
+
+ if (!filePath) {
+ log.error('No filePath provided for asset.');
+ console.error(asset);
+ return;
+ }
+
+ // Handle imported remote videos.
+ if (filePath && /^https?:\/\//.test(filePath)) {
+ return uploadRequestedFile(asset);
+ }
+
+ if (asset.status === 'ready') {
+ return;
+ } else if (asset.status === 'uploading') {
+ // Right now this re-starts the upload from the beginning.
+ // We should probably do something smarter here.
+ log.info(log.label('Resuming upload:'), filePath);
+ }
+
+ await updateAsset(filePath, {
+ status: 'uploading'
+ });
+
+ await initS3();
+
+ const fileStats = await fs.stat(filePath);
+ const stream = createReadStream(filePath);
+
+ return putAsset(filePath, fileStats.size, stream);
+}
+
+export async function uploadRequestedFile(asset: Asset) {
+ const filePath = asset.originalFilePath;
+
+ if (!filePath) {
+ log.error('No URL provided for asset.');
+ console.error(asset);
+ return;
+ }
+
+ if (asset.status === 'ready') {
+ return;
+ }
+
+ await updateAsset(filePath, {
+ status: 'uploading'
+ });
+
+ await initS3();
+
+ const response = await uFetch(filePath);
+ const size = Number(response.headers.get('content-length'));
+ const stream = response.body;
+
+ if (!stream) {
+ log.error('Error fetching the requested file:', filePath);
+ return;
+ }
+
+ return putAsset(filePath, size, Readable.fromWeb(stream));
+}
+
+async function putAsset(filePath: string, size: number, stream: ReadStream | Readable) {
+ log.info(log.label('Uploading file:'), `${filePath} (${size} bytes)`);
+
+ try {
+ await putObject(s3, {
+ ACL: 'public-read',
+ Bucket: bucketName,
+ Key: path.basename(filePath),
+ Body: stream,
+ ContentLength: size,
+ });
+
+ if (stream instanceof ReadStream) {
+ stream.close();
+ }
+ } catch (e) {
+ log.error('Error uploading to Amazon S3');
+ console.error(e);
+ return;
+ }
+
+ log.success(log.label('File uploaded:'), `${filePath} (${size} bytes)`);
+
+ const updatedAsset = await updateAsset(filePath, {
+ status: 'ready',
+ providerMetadata: {
+ 'amazon-s3': {
+ endpoint,
+ bucket: bucketName,
+ } as AmazonS3Metadata
+ },
+ });
+
+ const url = updatedAsset.sources?.[0].src;
+ log.space(chalk.gray('>'), log.label('URL:'), url);
+
+ return updatedAsset;
+}
diff --git a/src/providers/amazon-s3/transformer.ts b/src/providers/amazon-s3/transformer.ts
new file mode 100644
index 0000000..7f8d1af
--- /dev/null
+++ b/src/providers/amazon-s3/transformer.ts
@@ -0,0 +1,19 @@
+import type { Asset, AssetSource } from '../../assets.js';
+
+export function transform(asset: Asset) {
+ const providerMetadata = asset.providerMetadata?.['amazon-s3'];
+ if (!providerMetadata) return asset;
+
+ const src = new URL(providerMetadata.endpoint);
+ src.hostname = `${providerMetadata.bucket}.${src.hostname}`;
+
+ const basename = asset.originalFilePath.split('/').pop();
+ if (basename) src.pathname = basename
+
+ const source: AssetSource = { src: `${src}` };
+
+ return {
+ ...asset,
+ sources: [source],
+ };
+}
diff --git a/src/providers/backblaze/provider.ts b/src/providers/backblaze/provider.ts
index 3613b90..58bf1c0 100644
--- a/src/providers/backblaze/provider.ts
+++ b/src/providers/backblaze/provider.ts
@@ -66,7 +66,9 @@ async function initS3() {
log.info(log.label('Creating Backblaze bucket:'), bucketName);
try {
- await createBucket(s3, bucketName);
+ await createBucket(s3, bucketName, {
+ ACL: 'public-read',
+ });
await putBucketCors(s3, bucketName);
} catch (err) {
log.error('Error creating Backblaze bucket');
diff --git a/src/providers/providers.ts b/src/providers/providers.ts
index 6e7be9a..49d53a2 100644
--- a/src/providers/providers.ts
+++ b/src/providers/providers.ts
@@ -1,3 +1,4 @@
export * as mux from './mux/provider.js';
export * as vercelBlob from './vercel-blob/provider.js';
export * as backblaze from './backblaze/provider.js';
+export * as amazonS3 from './amazon-s3/provider.js';
diff --git a/src/providers/transformers.ts b/src/providers/transformers.ts
index d3551f7..1559add 100644
--- a/src/providers/transformers.ts
+++ b/src/providers/transformers.ts
@@ -1,3 +1,4 @@
export * as mux from './mux/transformer.js';
export * as vercelBlob from './vercel-blob/transformer.js';
export * as backblaze from './backblaze/transformer.js';
+export * as amazonS3 from './amazon-s3/transformer.js';
diff --git a/src/utils/s3.ts b/src/utils/s3.ts
index 7cbd5de..605a591 100644
--- a/src/utils/s3.ts
+++ b/src/utils/s3.ts
@@ -3,8 +3,9 @@ import {
PutBucketCorsCommand,
CreateBucketCommand,
PutObjectCommand,
- PutObjectCommandInput,
- ListBucketsCommand
+ ListBucketsCommand,
+ DeletePublicAccessBlockCommand,
+ PutBucketAclCommand,
} from '@aws-sdk/client-s3';
export async function findBucket(s3: S3Client, callbackFn: (bucket: { Name?: string }) => boolean | void) {
@@ -12,14 +13,28 @@ export async function findBucket(s3: S3Client, callbackFn: (bucket: { Name?: str
return Buckets?.find(callbackFn);
}
-export function createBucket(s3: S3Client, bucketName: string) {
+export function createBucket(s3: S3Client, bucketName: string, input?: Partial) {
return s3.send(new CreateBucketCommand({
Bucket: bucketName,
- ACL: 'public-read',
+ ...input
}));
}
-export function putObject(s3: S3Client, input: PutObjectCommandInput) {
+export async function putBucketAcl(s3: S3Client, bucketName: string, input?: Partial) {
+ // Remove the public access block that is created by default.
+ // https://aws.amazon.com/blogs/aws/heads-up-amazon-s3-security-changes-are-coming-in-april-of-2023/
+ await s3.send(new DeletePublicAccessBlockCommand({
+ Bucket: bucketName
+ }));
+
+ return s3.send(new PutBucketAclCommand({
+ Bucket: bucketName,
+ ACL: input?.ACL ?? 'public-read',
+ ...input,
+ }));
+}
+
+export function putObject(s3: S3Client, input: PutObjectCommand['input']) {
return s3.send(new PutObjectCommand(input));
}