Skip to content

Commit abe61a4

Browse files
authored
Merge branch 'master' into merge-back/1.64.0
2 parents 9510201 + 451200e commit abe61a4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+1191
-489
lines changed

.devcontainer.json

+8
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
{
2+
"name": "Dev Container Definition - AWS CDK",
3+
"image": "jsii/superchain",
4+
"postCreateCommand": "yarn build --skip-test --no-bail --skip-prereqs --skip-compat",
5+
"extensions": [
6+
7+
]
8+
}

packages/@aws-cdk/aws-codepipeline-actions/lib/codecommit/source-action.ts

+23-3
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import * as codecommit from '@aws-cdk/aws-codecommit';
22
import * as codepipeline from '@aws-cdk/aws-codepipeline';
33
import * as targets from '@aws-cdk/aws-events-targets';
44
import * as iam from '@aws-cdk/aws-iam';
5-
import { Construct } from '@aws-cdk/core';
5+
import { Construct, Token } from '@aws-cdk/core';
66
import { Action } from '../action';
77
import { sourceArtifactBounds } from '../common';
88

@@ -122,8 +122,8 @@ export class CodeCommitSourceAction extends Action {
122122
const createEvent = this.props.trigger === undefined ||
123123
this.props.trigger === CodeCommitTrigger.EVENTS;
124124
if (createEvent) {
125-
const branchIdDisambiguator = this.branch === 'master' ? '' : `-${this.branch}-`;
126-
this.props.repository.onCommit(`${stage.pipeline.node.uniqueId}${branchIdDisambiguator}EventRule`, {
125+
const eventId = this.generateEventId(stage);
126+
this.props.repository.onCommit(eventId, {
127127
target: new targets.CodePipeline(stage.pipeline),
128128
branches: [this.branch],
129129
});
@@ -153,4 +153,24 @@ export class CodeCommitSourceAction extends Action {
153153
},
154154
};
155155
}
156+
157+
private generateEventId(stage: codepipeline.IStage): string {
158+
const baseId = stage.pipeline.node.uniqueId;
159+
if (Token.isUnresolved(this.branch)) {
160+
let candidate = '';
161+
let counter = 0;
162+
do {
163+
candidate = this.eventIdFromPrefix(`${baseId}${counter}`);
164+
counter += 1;
165+
} while (this.props.repository.node.tryFindChild(candidate) !== undefined);
166+
return candidate;
167+
} else {
168+
const branchIdDisambiguator = this.branch === 'master' ? '' : '-${this.branch}-';
169+
return this.eventIdFromPrefix(`${baseId}${branchIdDisambiguator}`);
170+
}
171+
}
172+
173+
private eventIdFromPrefix(eventIdPrefix: string) {
174+
return `${eventIdPrefix}EventRule`;
175+
}
156176
}

packages/@aws-cdk/aws-codepipeline-actions/test/codecommit/test.codecommit-source-action.ts

+44-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import { countResources, expect, haveResourceLike, not } from '@aws-cdk/assert';
22
import * as codebuild from '@aws-cdk/aws-codebuild';
33
import * as codecommit from '@aws-cdk/aws-codecommit';
44
import * as codepipeline from '@aws-cdk/aws-codepipeline';
5-
import { Stack } from '@aws-cdk/core';
5+
import { Stack, Lazy } from '@aws-cdk/core';
66
import { Test } from 'nodeunit';
77
import * as cpactions from '../../lib';
88

@@ -224,6 +224,49 @@ export = {
224224

225225
test.done();
226226
},
227+
228+
'allows using a Token for the branch name'(test: Test) {
229+
const stack = new Stack();
230+
231+
const sourceOutput = new codepipeline.Artifact();
232+
new codepipeline.Pipeline(stack, 'P', {
233+
stages: [
234+
{
235+
stageName: 'Source',
236+
actions: [
237+
new cpactions.CodeCommitSourceAction({
238+
actionName: 'CodeCommit',
239+
repository: new codecommit.Repository(stack, 'R', {
240+
repositoryName: 'repository',
241+
}),
242+
branch: Lazy.stringValue({ produce: () => 'my-branch' }),
243+
output: sourceOutput,
244+
}),
245+
],
246+
},
247+
{
248+
stageName: 'Build',
249+
actions: [
250+
new cpactions.CodeBuildAction({
251+
actionName: 'Build',
252+
project: new codebuild.PipelineProject(stack, 'CodeBuild'),
253+
input: sourceOutput,
254+
}),
255+
],
256+
},
257+
],
258+
});
259+
260+
expect(stack).to(haveResourceLike('AWS::Events::Rule', {
261+
EventPattern: {
262+
detail: {
263+
referenceName: ['my-branch'],
264+
},
265+
},
266+
}));
267+
268+
test.done();
269+
},
227270
},
228271
};
229272

packages/@aws-cdk/aws-eks/README.md

+34-34
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@ cluster.addManifest('mypod', {
4747
});
4848
```
4949

50+
> **NOTE: You can only create 1 cluster per stack.** If you have a use-case for multiple clusters per stack, > or would like to understand more about this limitation, see https://github.com/aws/aws-cdk/issues/10073.
51+
5052
In order to interact with your cluster through `kubectl`, you can use the `aws
5153
eks update-kubeconfig` [AWS CLI command](https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html)
5254
to configure your local kubeconfig.
@@ -98,7 +100,8 @@ const cluster = new eks.Cluster(this, 'hello-eks', {
98100
});
99101
```
100102

101-
The default value is `eks.EndpointAccess.PUBLIC_AND_PRIVATE`. Which means the cluster endpoint is accessible from outside of your VPC, and worker node traffic to the endpoint will stay within your VPC.
103+
The default value is `eks.EndpointAccess.PUBLIC_AND_PRIVATE`. Which means the cluster endpoint is accessible from outside of your VPC, but worker node traffic as well as `kubectl` commands
104+
to the endpoint will stay within your VPC.
102105

103106
### Capacity
104107

@@ -139,16 +142,12 @@ new eks.Cluster(this, 'cluster-with-no-capacity', {
139142
});
140143
```
141144

142-
The `cluster.defaultCapacity` property will reference the `AutoScalingGroup`
143-
resource for the default capacity. It will be `undefined` if `defaultCapacity`
144-
is set to `0` or `defaultCapacityType` is either `NODEGROUP` or undefined.
145+
When creating a cluster with default capacity (i.e `defaultCapacity !== 0` or is undefined), you can access the allocated capacity using:
145146

146-
And the `cluster.defaultNodegroup` property will reference the `Nodegroup`
147-
resource for the default capacity. It will be `undefined` if `defaultCapacity`
148-
is set to `0` or `defaultCapacityType` is `EC2`.
147+
- `cluster.defaultCapacity` will reference the `AutoScalingGroup` resource in case `defaultCapacityType` is set to `EC2` or is undefined.
148+
- `cluster.defaultNodegroup` will reference the `Nodegroup` resource in case `defaultCapacityType` is set to `NODEGROUP`.
149149

150-
You can add `AutoScalingGroup` resource as customized capacity through `cluster.addCapacity()` or
151-
`cluster.addAutoScalingGroup()`:
150+
You can add customized capacity in the form of an `AutoScalingGroup` resource through `cluster.addCapacity()` or `cluster.addAutoScalingGroup()`:
152151

153152
```ts
154153
cluster.addCapacity('frontend-nodes', {
@@ -167,7 +166,7 @@ for Amazon EKS Kubernetes clusters. By default, `eks.Nodegroup` create a nodegro
167166
new eks.Nodegroup(stack, 'nodegroup', { cluster });
168167
```
169168

170-
You can add customized node group through `cluster.addNodegroup()`:
169+
You can add customized node groups through `cluster.addNodegroup()`:
171170

172171
```ts
173172
cluster.addNodegroup('nodegroup', {
@@ -206,14 +205,13 @@ this.cluster.addNodegroup('extra-ng', {
206205

207206
### ARM64 Support
208207

209-
Instance types with `ARM64` architecture are supported in both managed nodegroup and self-managed capacity. Simply specify an ARM64 `instanceType` (such as `m6g.medium`), and the latest
208+
Instance types with `ARM64` architecture are supported in both managed nodegroup and self-managed capacity. Simply specify an ARM64 `instanceType` (such as `m6g.medium`), and the latest
210209
Amazon Linux 2 AMI for ARM64 will be automatically selected.
211210

212211
```ts
213-
// create a cluster with a default managed nodegroup
212+
// create a cluster with a default managed nodegroup
214213
cluster = new eks.Cluster(this, 'Cluster', {
215214
vpc,
216-
mastersRole,
217215
version: eks.KubernetesVersion.V1_17,
218216
});
219217

@@ -298,12 +296,9 @@ can cause your EC2 instance to become unavailable, such as [EC2 maintenance even
298296
and [EC2 Spot interruptions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html) and helps gracefully stop all pods running on spot nodes that are about to be
299297
terminated.
300298

301-
Current version:
302-
303-
| name | version |
304-
|------------|---------|
305-
| Helm Chart | 0.9.5 |
306-
| App | 1.7.0 |
299+
> Handler Version: [1.7.0](https://github.com/aws/aws-node-termination-handler/releases/tag/v1.7.0)
300+
>
301+
> Chart Version: [0.9.5](https://github.com/aws/eks-charts/blob/v0.0.28/stable/aws-node-termination-handler/Chart.yaml)
307302
308303
### Bootstrapping
309304

@@ -327,7 +322,7 @@ cluster.addCapacity('spot', {
327322
To disable bootstrapping altogether (i.e. to fully customize user-data), set `bootstrapEnabled` to `false` when you add
328323
the capacity.
329324

330-
### Kubernetes Resources
325+
### Kubernetes Manifests
331326

332327
The `KubernetesManifest` construct or `cluster.addManifest` method can be used
333328
to apply Kubernetes resource manifests to this cluster.
@@ -387,7 +382,7 @@ cluster.addManifest('hello-kub', service, deployment);
387382

388383
#### Kubectl Layer and Environment
389384

390-
The resources are created in the cluster by running `kubectl apply` from a python lambda function. You can configure the environment of this function by specifying it at cluster instantiation. For example, this can useful in order to configure an http proxy:
385+
The resources are created in the cluster by running `kubectl apply` from a python lambda function. You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
391386

392387
```typescript
393388
const cluster = new eks.Cluster(this, 'hello-eks', {
@@ -450,10 +445,14 @@ const manifest = yaml.safeLoadAll(request('GET', manifestUrl).getBody());
450445
cluster.addManifest('my-resource', ...manifest);
451446
```
452447

453-
Since Kubernetes resources are implemented as CloudFormation resources in the
454-
CDK. This means that if the resource is deleted from your code (or the stack is
448+
Since Kubernetes manifests are implemented as CloudFormation resources in the
449+
CDK. This means that if the manifest is deleted from your code (or the stack is
455450
deleted), the next `cdk deploy` will issue a `kubectl delete` command and the
456-
Kubernetes resources will be deleted.
451+
Kubernetes resources in that manifest will be deleted.
452+
453+
#### Caveat
454+
455+
If you have multiple resources in a single `KubernetesManifest`, and one of those **resources** is removed from the manifest, it will not be deleted and will remain orphan. See [Support Object pruning](https://github.com/aws/aws-cdk/issues/10495) for more details.
457456

458457
#### Dependencies
459458

@@ -482,9 +481,9 @@ const service = cluster.addManifest('my-service', {
482481
service.node.addDependency(namespace); // will apply `my-namespace` before `my-service`.
483482
```
484483

485-
NOTE: when a `KubernetesManifest` includes multiple resources (either directly
484+
**NOTE:** when a `KubernetesManifest` includes multiple resources (either directly
486485
or through `cluster.addManifest()`) (e.g. `cluster.addManifest('foo', r1, r2,
487-
r3,...))`), these resources will be applied as a single manifest via `kubectl`
486+
r3,...)`), these resources will be applied as a single manifest via `kubectl`
488487
and will be applied sequentially (the standard behavior in `kubectl`).
489488

490489
### Patching Kubernetes Resources
@@ -582,7 +581,7 @@ If the cluster is configured with private-only or private and restricted public
582581
Kubernetes [endpoint access](#endpoint-access), you must also specify:
583582

584583
- `kubectlSecurityGroupId` - the ID of an EC2 security group that is allowed
585-
connections to the cluster's control security group.
584+
connections to the cluster's control security group. For example, the EKS managed [cluster security group](#cluster-security-group).
586585
- `kubectlPrivateSubnetIds` - a list of private VPC subnets IDs that will be used
587586
to access the Kubernetes endpoint.
588587

@@ -598,7 +597,7 @@ users, roles and accounts.
598597
Furthermore, when auto-scaling capacity is added to the cluster (through
599598
`cluster.addCapacity` or `cluster.addAutoScalingGroup`), the IAM instance role
600599
of the auto-scaling group will be automatically mapped to RBAC so nodes can
601-
connect to the cluster. No manual mapping is required any longer.
600+
connect to the cluster. No manual mapping is required.
602601

603602
For example, let's say you want to grant an IAM user administrative privileges
604603
on your cluster:
@@ -657,11 +656,10 @@ const clusterEncryptionConfigKeyArn = cluster.clusterEncryptionConfigKeyArn;
657656
### Node ssh Access
658657

659658
If you want to be able to SSH into your worker nodes, you must already
660-
have an SSH key in the region you're connecting to and pass it, and you must
661-
be able to connect to the hosts (meaning they must have a public IP and you
659+
have an SSH key in the region you're connecting to and pass it when you add capacity to the cluster. You must also be able to connect to the hosts (meaning they must have a public IP and you
662660
should be allowed to connect to them on port 22):
663661

664-
[ssh into nodes example](test/example.ssh-into-nodes.lit.ts)
662+
See [SSH into nodes](test/example.ssh-into-nodes.lit.ts) for a code example.
665663

666664
If you want to SSH into nodes in a private subnet, you should set up a
667665
bastion host in a public subnet. That setup is recommended, but is
@@ -699,7 +697,7 @@ cluster.addChart('NginxIngress', {
699697
Helm charts will be installed and updated using `helm upgrade --install`, where a few parameters
700698
are being passed down (such as `repo`, `values`, `version`, `namespace`, `wait`, `timeout`, etc).
701699
This means that if the chart is added to CDK with the same release name, it will try to update
702-
the chart in the cluster. The chart will exists as CloudFormation resource.
700+
the chart in the cluster.
703701

704702
Helm charts are implemented as CloudFormation resources in CDK.
705703
This means that if the chart is deleted from your code (or the stack is
@@ -775,9 +773,11 @@ const mypod = cluster.addManifest('mypod', {
775773
}
776774
});
777775

778-
// create the resource after the service account
776+
// create the resource after the service account.
777+
// note that using `sa.serviceAccountName` above **does not** translate into a dependency.
778+
// this is why an explicit dependency is needed. See https://github.com/aws/aws-cdk/issues/9910 for more details.
779779
mypod.node.addDependency(sa);
780780

781781
// print the IAM role arn for this service account
782782
new cdk.CfnOutput(this, 'ServiceAccountIamRole', { value: sa.role.roleArn })
783-
```
783+
```

packages/@aws-cdk/aws-lambda-nodejs/lib/bundling.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ export class Bundling {
212212
});
213213

214214
return lambda.Code.fromAsset(projectRoot, {
215-
assetHashType: cdk.AssetHashType.BUNDLE,
215+
assetHashType: cdk.AssetHashType.OUTPUT,
216216
bundling: {
217217
local: localBundler,
218218
...dockerBundler.bundlingOptions,

packages/@aws-cdk/aws-lambda-nodejs/test/bundling.test.ts

+6-6
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ test('Parcel bundling', () => {
4040

4141
// Correctly bundles with parcel
4242
expect(Code.fromAsset).toHaveBeenCalledWith('/project', {
43-
assetHashType: AssetHashType.BUNDLE,
43+
assetHashType: AssetHashType.OUTPUT,
4444
bundling: expect.objectContaining({
4545
local: {
4646
props: expect.objectContaining({
@@ -93,7 +93,7 @@ test('Parcel bundling with handler named index.ts', () => {
9393

9494
// Correctly bundles with parcel
9595
expect(Code.fromAsset).toHaveBeenCalledWith('/project', {
96-
assetHashType: AssetHashType.BUNDLE,
96+
assetHashType: AssetHashType.OUTPUT,
9797
bundling: expect.objectContaining({
9898
command: [
9999
'bash', '-c',
@@ -112,7 +112,7 @@ test('Parcel bundling with tsx handler', () => {
112112

113113
// Correctly bundles with parcel
114114
expect(Code.fromAsset).toHaveBeenCalledWith('/project', {
115-
assetHashType: AssetHashType.BUNDLE,
115+
assetHashType: AssetHashType.OUTPUT,
116116
bundling: expect.objectContaining({
117117
command: [
118118
'bash', '-c',
@@ -152,7 +152,7 @@ test('Parcel bundling with externals and dependencies', () => {
152152

153153
// Correctly bundles with parcel
154154
expect(Code.fromAsset).toHaveBeenCalledWith('/project', {
155-
assetHashType: AssetHashType.BUNDLE,
155+
assetHashType: AssetHashType.OUTPUT,
156156
bundling: expect.objectContaining({
157157
command: [
158158
'bash', '-c',
@@ -199,7 +199,7 @@ test('Detects yarn.lock', () => {
199199

200200
// Correctly bundles with parcel
201201
expect(Code.fromAsset).toHaveBeenCalledWith('/project', {
202-
assetHashType: AssetHashType.BUNDLE,
202+
assetHashType: AssetHashType.OUTPUT,
203203
bundling: expect.objectContaining({
204204
command: expect.arrayContaining([
205205
expect.stringMatching(/yarn\.lock.+yarn install/),
@@ -316,7 +316,7 @@ test('Custom bundling docker image', () => {
316316
});
317317

318318
expect(Code.fromAsset).toHaveBeenCalledWith('/project', {
319-
assetHashType: AssetHashType.BUNDLE,
319+
assetHashType: AssetHashType.OUTPUT,
320320
bundling: expect.objectContaining({
321321
image: { image: 'my-custom-image' },
322322
}),

0 commit comments

Comments
 (0)