-
Notifications
You must be signed in to change notification settings - Fork 140
/
Copy pathmulti-tier-ddb-s3.ts
149 lines (141 loc) · 4.92 KB
/
multi-tier-ddb-s3.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import type { CacheValue, IncrementalCache } from "types/overrides";
import { customFetchClient } from "utils/fetch";
import { LRUCache } from "utils/lru";
import { debug } from "../../adapters/logger";
import S3Cache, { getAwsClient } from "./s3-lite";
// TTL for the local cache in milliseconds
const localCacheTTL = process.env.OPEN_NEXT_LOCAL_CACHE_TTL_MS
? Number.parseInt(process.env.OPEN_NEXT_LOCAL_CACHE_TTL_MS, 10)
: 0;
// Maximum size of the local cache in nb of entries
const maxCacheSize = process.env.OPEN_NEXT_LOCAL_CACHE_SIZE
? Number.parseInt(process.env.OPEN_NEXT_LOCAL_CACHE_SIZE, 10)
: 1000;
const localCache = new LRUCache<{
value: CacheValue<false>;
lastModified: number;
}>(maxCacheSize);
const awsFetch = (body: RequestInit["body"], type: "get" | "set" = "get") => {
const { CACHE_BUCKET_REGION } = process.env;
const client = getAwsClient();
return customFetchClient(client)(
`https://dynamodb.${CACHE_BUCKET_REGION}.amazonaws.com`,
{
method: "POST",
headers: {
"Content-Type": "application/x-amz-json-1.0",
"X-Amz-Target": `DynamoDB_20120810.${
type === "get" ? "GetItem" : "PutItem"
}`,
},
body,
},
);
};
const buildDynamoKey = (key: string) => {
const { NEXT_BUILD_ID } = process.env;
return `__meta_${NEXT_BUILD_ID}_${key}`;
};
/**
* This cache implementation uses a multi-tier cache with a local cache, a DynamoDB metadata cache and an S3 cache.
* It uses the same DynamoDB table as the default tag cache and the same S3 bucket as the default incremental cache.
* It will first check the local cache.
* If the local cache is expired, it will check the DynamoDB metadata cache to see if the local cache is still valid.
* Lastly it will check the S3 cache.
*/
const multiTierCache: IncrementalCache = {
name: "multi-tier-ddb-s3",
async get<IsFetch extends boolean = false>(key: string, isFetch?: IsFetch) {
// First we check the local cache
const localCacheEntry = localCache.get(key) as
| {
value: CacheValue<IsFetch>;
lastModified: number;
}
| undefined;
if (localCacheEntry) {
if (Date.now() - localCacheEntry.lastModified < localCacheTTL) {
debug("Using local cache without checking ddb");
return localCacheEntry;
}
try {
// Here we'll check ddb metadata to see if the local cache is still valid
const { CACHE_DYNAMO_TABLE } = process.env;
const result = await awsFetch(
JSON.stringify({
TableName: CACHE_DYNAMO_TABLE,
Key: {
path: { S: buildDynamoKey(key) },
tag: { S: buildDynamoKey(key) },
},
}),
);
if (result.status === 200) {
const data = await result.json();
const hasBeenDeleted = data.Item?.deleted?.BOOL;
if (hasBeenDeleted) {
localCache.delete(key);
return { value: undefined, lastModified: 0 };
}
// If the metadata is older than the local cache, we can use the local cache
// If it's not found we assume that no write has been done yet and we can use the local cache
const lastModified = data.Item?.revalidatedAt?.N
? Number.parseInt(data.Item.revalidatedAt.N, 10)
: 0;
if (lastModified <= localCacheEntry.lastModified) {
debug("Using local cache after checking ddb");
return localCacheEntry;
}
}
} catch (e) {
debug("Failed to get metadata from ddb", e);
}
}
const result = await S3Cache.get(key, isFetch);
if (result?.value) {
localCache.set(key, {
value: result.value,
lastModified: result.lastModified ?? Date.now(),
});
}
return result;
},
// Both for set and delete we choose to do the write to S3 first and then to DynamoDB
// Which means that if it fails in DynamoDB, instance that don't have local cache will work as expected.
// But instance that have local cache will have a stale cache until the next working set or delete.
async set(key, value, isFetch) {
const revalidatedAt = Date.now();
await S3Cache.set(key, value, isFetch);
await awsFetch(
JSON.stringify({
TableName: process.env.CACHE_DYNAMO_TABLE,
Item: {
tag: { S: buildDynamoKey(key) },
path: { S: buildDynamoKey(key) },
revalidatedAt: { N: String(revalidatedAt) },
},
}),
"set",
);
localCache.set(key, {
value,
lastModified: revalidatedAt,
});
},
async delete(key) {
await S3Cache.delete(key);
await awsFetch(
JSON.stringify({
TableName: process.env.CACHE_DYNAMO_TABLE,
Item: {
tag: { S: buildDynamoKey(key) },
path: { S: buildDynamoKey(key) },
deleted: { BOOL: true },
},
}),
"set",
);
localCache.delete(key);
},
};
export default multiTierCache;