diff --git a/packages/@aws-cdk/aws-backup/README.md b/packages/@aws-cdk/aws-backup/README.md index 6eaae31c19aa2..5cf5bc4c1aa1a 100644 --- a/packages/@aws-cdk/aws-backup/README.md +++ b/packages/@aws-cdk/aws-backup/README.md @@ -32,7 +32,8 @@ const plan = backup.BackupPlan.dailyWeeklyMonthly5YearRetention(this, 'Plan'); Assigning resources to a plan can be done with `addSelection()`: -```ts fixture=with-plan +```ts +declare const plan: backup.BackupPlan; const myTable = dynamodb.Table.fromTableName(this, 'Table', 'myTableName'); const myCoolConstruct = new Construct(this, 'MyCoolConstruct'); @@ -50,16 +51,17 @@ created for the selection. The `BackupSelection` implements `IGrantable`. To add rules to a plan, use `addRule()`: -```ts fixture=with-plan +```ts +declare const plan: backup.BackupPlan; plan.addRule(new backup.BackupPlanRule({ completionWindow: Duration.hours(2), startWindow: Duration.hours(1), scheduleExpression: events.Schedule.cron({ // Only cron expressions are supported day: '15', hour: '3', - minute: '30' + minute: '30', }), - moveToColdStorageAfter: Duration.days(30) + moveToColdStorageAfter: Duration.days(30), })); ``` @@ -69,7 +71,8 @@ If no value is specified, the retention period is set to 35 days which is the ma Property `moveToColdStorageAfter` must not be specified because PITR does not support this option. This example defines an AWS Backup rule with PITR and a retention period set to 14 days: -```ts fixture=with-plan +```ts +declare const plan: backup.BackupPlan; plan.addRule(new backup.BackupPlanRule({ enableContinuousBackup: true, deleteAfter: Duration.days(14), @@ -78,7 +81,8 @@ plan.addRule(new backup.BackupPlanRule({ Ready-made rules are also available: -```ts fixture=with-plan +```ts +declare const plan: backup.BackupPlan; plan.addRule(backup.BackupPlanRule.daily()); plan.addRule(backup.BackupPlanRule.weekly()); ``` @@ -152,7 +156,7 @@ const vault = new backup.BackupVault(this, 'Vault', { }, }), ], - }); + }), }) ``` @@ -166,8 +170,8 @@ new backup.BackupVault(this, 'Vault', { blockRecoveryPointDeletion: true, }); -const plan = backup.BackupPlan.dailyMonthly1YearRetention(this, 'Plan'); -plan.backupVault.blockRecoveryPointDeletion(); +declare const backupVault: backup.BackupVault; +backupVault.blockRecoveryPointDeletion(); ``` By default access is not restricted. diff --git a/packages/@aws-cdk/aws-backup/package.json b/packages/@aws-cdk/aws-backup/package.json index 0e1e552e15db4..a29e6be543645 100644 --- a/packages/@aws-cdk/aws-backup/package.json +++ b/packages/@aws-cdk/aws-backup/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-backup/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-backup/rosetta/default.ts-fixture index cff23bb514119..5f28d8bba18e2 100644 --- a/packages/@aws-cdk/aws-backup/rosetta/default.ts-fixture +++ b/packages/@aws-cdk/aws-backup/rosetta/default.ts-fixture @@ -3,6 +3,8 @@ import { Duration, RemovalPolicy, Stack } from '@aws-cdk/core'; import { Construct } from 'constructs'; import * as backup from '@aws-cdk/aws-backup'; import * as iam from '@aws-cdk/aws-iam'; +import * as dynamodb from '@aws-cdk/aws-dynamodb'; +import * as events from '@aws-cdk/aws-events'; import * as kms from '@aws-cdk/aws-kms'; import * as sns from '@aws-cdk/aws-sns'; diff --git a/packages/@aws-cdk/aws-backup/rosetta/with-plan.ts-fixture b/packages/@aws-cdk/aws-backup/rosetta/with-plan.ts-fixture deleted file mode 100644 index 8dbfd6ac72c89..0000000000000 --- a/packages/@aws-cdk/aws-backup/rosetta/with-plan.ts-fixture +++ /dev/null @@ -1,16 +0,0 @@ -// Fixture with packages imported, but nothing else -import { Duration, RemovalPolicy, Stack } from '@aws-cdk/core'; -import { Construct } from 'constructs'; -import * as backup from '@aws-cdk/aws-backup'; -import * as dynamodb from '@aws-cdk/aws-dynamodb'; -import * as events from '@aws-cdk/aws-events'; - -class Fixture extends Stack { - constructor(scope: Construct, id: string) { - super(scope, id); - - const plan = backup.BackupPlan.dailyWeeklyMonthly5YearRetention(this, 'Plan'); - - /// here - } -} diff --git a/packages/@aws-cdk/aws-cloudtrail/README.md b/packages/@aws-cdk/aws-cloudtrail/README.md index 3deccd47545ea..46c5c0a95bd22 100644 --- a/packages/@aws-cdk/aws-cloudtrail/README.md +++ b/packages/@aws-cdk/aws-cloudtrail/README.md @@ -68,6 +68,8 @@ default retention setting. The following code enables sending CloudWatch logs bu period for the created Log Group. ```ts +import * as logs from '@aws-cdk/aws-logs'; + const trail = new cloudtrail.Trail(this, 'CloudTrail', { sendToCloudWatchLogs: true, cloudWatchLogsRetention: logs.RetentionDays.FOUR_MONTHS, @@ -88,18 +90,18 @@ The following code filters events for S3 from a specific AWS account and trigger ```ts const myFunctionHandler = new lambda.Function(this, 'MyFunction', { - code: lambda.Code.fromAsset('resource/myfunction'); + code: lambda.Code.fromAsset('resource/myfunction'), runtime: lambda.Runtime.NODEJS_12_X, handler: 'index.handler', }); -const eventRule = Trail.onEvent(this, 'MyCloudWatchEvent', { - target: new eventTargets.LambdaFunction(myFunctionHandler), +const eventRule = cloudtrail.Trail.onEvent(this, 'MyCloudWatchEvent', { + target: new targets.LambdaFunction(myFunctionHandler), }); eventRule.addEventPattern({ - account: '123456789012', - source: 'aws.s3', + account: ['123456789012'], + source: ['aws.s3'], }); ``` @@ -141,7 +143,7 @@ The following code configures the `Trail` to only track management events that a ```ts const trail = new cloudtrail.Trail(this, 'CloudTrail', { // ... - managementEvents: ReadWriteType.READ_ONLY, + managementEvents: cloudtrail.ReadWriteType.READ_ONLY, }); ``` @@ -157,13 +159,14 @@ be used to configure logging of S3 data events for specific buckets and specific configures logging of S3 data events for `fooBucket` and with object prefix `bar/`. ```ts -import * as cloudtrail from '@aws-cdk/aws-cloudtrail'; +import * as s3 from '@aws-cdk/aws-s3'; const trail = new cloudtrail.Trail(this, 'MyAmazingCloudTrail'); +declare const bucket: s3.Bucket; // Adds an event selector to the bucket foo trail.addS3EventSelector([{ - bucket: fooBucket, // 'fooBucket' is of type s3.IBucket + bucket, objectPrefix: 'bar/', }]); ``` @@ -174,12 +177,12 @@ configures logging of Lambda data events for a specific Function. ```ts const trail = new cloudtrail.Trail(this, 'MyAmazingCloudTrail'); -const amazingFunction = new lambda.Function(stack, 'AnAmazingFunction', { +const amazingFunction = new lambda.Function(this, 'AnAmazingFunction', { runtime: lambda.Runtime.NODEJS_12_X, handler: "hello.handler", code: lambda.Code.fromAsset("lambda"), }); // Add an event selector to log data events for the provided Lambda functions. -trail.addLambdaEventSelector([ lambdaFunction ]); +trail.addLambdaEventSelector([ amazingFunction ]); ``` diff --git a/packages/@aws-cdk/aws-cloudtrail/package.json b/packages/@aws-cdk/aws-cloudtrail/package.json index 3a81482d9fd44..b6b6fbaa232f0 100644 --- a/packages/@aws-cdk/aws-cloudtrail/package.json +++ b/packages/@aws-cdk/aws-cloudtrail/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-cloudtrail/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-cloudtrail/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..b6440cd045f44 --- /dev/null +++ b/packages/@aws-cdk/aws-cloudtrail/rosetta/default.ts-fixture @@ -0,0 +1,14 @@ +// Fixture with packages imported, but nothing else +import { Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as cloudtrail from '@aws-cdk/aws-cloudtrail'; +import * as sns from '@aws-cdk/aws-sns'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as targets from '@aws-cdk/aws-events-targets'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-config/README.md b/packages/@aws-cdk/aws-config/README.md index 0a8219a8c3f53..3a66934a43d69 100644 --- a/packages/@aws-cdk/aws-config/README.md +++ b/packages/@aws-cdk/aws-config/README.md @@ -59,16 +59,15 @@ For example, you could create a managed rule that checks whether active access k within the number of days specified. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as cdk from '@aws-cdk/core'; - // https://docs.aws.amazon.com/config/latest/developerguide/access-keys-rotated.html new config.ManagedRule(this, 'AccessKeysRotated', { identifier: config.ManagedRuleIdentifiers.ACCESS_KEYS_ROTATED, inputParameters: { - maxAccessKeyAge: 60 // default is 90 days + maxAccessKeyAge: 60, // default is 90 days }, - maximumExecutionFrequency: config.MaximumExecutionFrequency.TWELVE_HOURS // default is 24 hours + + // default is 24 hours + maximumExecutionFrequency: config.MaximumExecutionFrequency.TWELVE_HOURS, }); ``` @@ -82,9 +81,6 @@ The following higher level constructs for AWS managed rules are available. Checks whether your active access keys are rotated within the number of days specified. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as cdk from '@aws-cdk/aws-cdk'; - // compliant if access keys have been rotated within the last 90 days new config.AccessKeysRotated(this, 'AccessKeyRotated'); ``` @@ -95,12 +91,9 @@ Checks whether your CloudFormation stack's actual configuration differs, or has from it's expected configuration. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as cdk from '@aws-cdk/aws-cdk'; - // compliant if stack's status is 'IN_SYNC' // non-compliant if the stack's drift status is 'DRIFTED' -new config.CloudFormationStackDriftDetectionCheck(stack, 'Drift', { +new config.CloudFormationStackDriftDetectionCheck(this, 'Drift', { ownStackOnly: true, // checks only the stack containing the rule }); ``` @@ -110,17 +103,14 @@ new config.CloudFormationStackDriftDetectionCheck(stack, 'Drift', { Checks whether your CloudFormation stacks are sending event notifications to a SNS topic. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as cdk from '@aws-cdk/aws-cdk'; - // topics to which CloudFormation stacks may send event notifications -const topic1 = new sns.Topic(stack, 'AllowedTopic1'); -const topic2 = new sns.Topic(stack, 'AllowedTopic2'); +const topic1 = new sns.Topic(this, 'AllowedTopic1'); +const topic2 = new sns.Topic(this, 'AllowedTopic2'); // non-compliant if CloudFormation stack does not send notifications to 'topic1' or 'topic2' new config.CloudFormationStackNotificationCheck(this, 'NotificationCheck', { topics: [topic1, topic2], -}) +}); ``` ### Custom rules @@ -140,13 +130,15 @@ To create a custom rule, define a `CustomRule` and specify the Lambda Function to run and the trigger types. ```ts -import * as config from '@aws-cdk/aws-config'; +declare const evalComplianceFn: lambda.Function; new config.CustomRule(this, 'CustomRule', { lambdaFunction: evalComplianceFn, configurationChanges: true, periodic: true, - maximumExecutionFrequency: config.MaximumExecutionFrequency.SIX_HOURS, // default is 24 hours + + // default is 24 hours + maximumExecutionFrequency: config.MaximumExecutionFrequency.SIX_HOURS, }); ``` @@ -165,22 +157,21 @@ Use the `RuleScope` APIs (`fromResource()`, `fromResources()` or `fromTag()`) to the scope of both managed and custom rules: ```ts -import * as config from '@aws-cdk/aws-config'; - const sshRule = new config.ManagedRule(this, 'SSH', { identifier: config.ManagedRuleIdentifiers.EC2_SECURITY_GROUPS_INCOMING_SSH_DISABLED, ruleScope: config.RuleScope.fromResource(config.ResourceType.EC2_SECURITY_GROUP, 'sg-1234567890abcdefgh'), // restrict to specific security group }); +declare const evalComplianceFn: lambda.Function; const customRule = new config.CustomRule(this, 'Lambda', { lambdaFunction: evalComplianceFn, - configurationChanges: true + configurationChanges: true, ruleScope: config.RuleScope.fromResources([config.ResourceType.CLOUDFORMATION_STACK, config.ResourceType.S3_BUCKET]), // restrict to all CloudFormation stacks and S3 buckets }); const tagRule = new config.CustomRule(this, 'CostCenterTagRule', { lambdaFunction: evalComplianceFn, - configurationChanges: true + configurationChanges: true, ruleScope: config.RuleScope.fromTag('Cost Center', 'MyApp'), // restrict to a specific tag }); ``` @@ -194,10 +185,6 @@ Use the `onComplianceChange()` APIs to trigger an EventBridge event when a compl of your AWS Config Rule fails: ```ts -import * as config from '@aws-cdk/aws-config'; -import * as sns from '@aws-cdk/aws-sns'; -import * as targets from '@aws-cdk/aws-events-targets'; - // Topic to which compliance notification events will be published const complianceTopic = new sns.Topic(this, 'ComplianceTopic'); @@ -211,15 +198,13 @@ Use the `onReEvaluationStatus()` status to trigger an EventBridge event when an rule is re-evaluated. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as sns from '@aws-cdk/aws-sns'; -import * as targets from '@aws-cdk/aws-events-targets'; - // Topic to which re-evaluation notification events will be published const reEvaluationTopic = new sns.Topic(this, 'ComplianceTopic'); + +const rule = new config.CloudFormationStackDriftDetectionCheck(this, 'Drift'); rule.onReEvaluationStatus('ReEvaluationEvent', { target: new targets.SnsTopic(reEvaluationTopic), -}) +}); ``` ### Example @@ -228,11 +213,6 @@ The following example creates a custom rule that evaluates whether EC2 instances Compliance events are published to an SNS topic. ```ts -import * as config from '@aws-cdk/aws-config'; -import * as lambda from '@aws-cdk/aws-lambda'; -import * as sns from '@aws-cdk/aws-sns'; -import * as targets from '@aws-cdk/aws-events-targets'; - // Lambda function containing logic that evaluates compliance with the rule. const evalComplianceFn = new lambda.Function(this, 'CustomFunction', { code: lambda.AssetCode.fromInline('exports.handler = (event) => console.log(event);'), @@ -244,7 +224,7 @@ const evalComplianceFn = new lambda.Function(this, 'CustomFunction', { const customRule = new config.CustomRule(this, 'Custom', { configurationChanges: true, lambdaFunction: evalComplianceFn, - ruleScope: config.RuleScope.fromResource([config.ResourceType.EC2_INSTANCE]), + ruleScope: config.RuleScope.fromResource(config.ResourceType.EC2_INSTANCE), }); // A rule to detect stack drifts diff --git a/packages/@aws-cdk/aws-config/package.json b/packages/@aws-cdk/aws-config/package.json index 3579910453b7a..0f2cc93d5f9ce 100644 --- a/packages/@aws-cdk/aws-config/package.json +++ b/packages/@aws-cdk/aws-config/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-config/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-config/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..f644a3f9c8157 --- /dev/null +++ b/packages/@aws-cdk/aws-config/rosetta/default.ts-fixture @@ -0,0 +1,14 @@ +// Fixture with packages imported, but nothing else +import { Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as config from '@aws-cdk/aws-config'; +import * as targets from '@aws-cdk/aws-events-targets'; +import * as sns from '@aws-cdk/aws-sns'; +import * as lambda from '@aws-cdk/aws-lambda'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + /// here + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-s3-assets/README.md b/packages/@aws-cdk/aws-s3-assets/README.md index a73cbf0919642..bf005eba020d4 100644 --- a/packages/@aws-cdk/aws-s3-assets/README.md +++ b/packages/@aws-cdk/aws-s3-assets/README.md @@ -95,18 +95,21 @@ method `tryBundle()` which should return `true` if local bundling was performed. If `false` is returned, docker bundling will be done: ```ts +class MyBundle implements ILocalBundling { + public tryBundle(outputDir: string, options: BundlingOptions) { + const canRunLocally = true // replace with actual logic + if (canRunLocally) { + // perform local bundling here + return true; + } + return false; + } +} + new assets.Asset(this, 'BundledAsset', { path: '/path/to/asset', bundling: { - local: { - tryBundle(outputDir: string, options: BundlingOptions) { - if (canRunLocally) { - // perform local bundling here - return true; - } - return false; - }, - }, + local: new MyBundle(), // Docker bundling fallback image: DockerImage.fromRegistry('alpine'), entrypoint: ['/bin/sh', '-c'], diff --git a/packages/@aws-cdk/aws-s3-assets/lib/asset.ts b/packages/@aws-cdk/aws-s3-assets/lib/asset.ts index 484e04e4a9cb2..2f04f4532b36e 100644 --- a/packages/@aws-cdk/aws-s3-assets/lib/asset.ts +++ b/packages/@aws-cdk/aws-s3-assets/lib/asset.ts @@ -76,13 +76,13 @@ export class Asset extends CoreConstruct implements cdk.IAsset { /** * Attribute which represents the S3 HTTP URL of this asset. - * @example https://s3.us-west-1.amazonaws.com/bucket/key + * For example, `https://s3.us-west-1.amazonaws.com/bucket/key` */ public readonly httpUrl: string; /** * Attribute which represents the S3 URL of this asset. - * @example s3://bucket/key + * For example, `s3://bucket/key` */ public readonly s3ObjectUrl: string; diff --git a/packages/@aws-cdk/aws-s3-assets/package.json b/packages/@aws-cdk/aws-s3-assets/package.json index 084b0968e7306..45163b32641f7 100644 --- a/packages/@aws-cdk/aws-s3-assets/package.json +++ b/packages/@aws-cdk/aws-s3-assets/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-s3-assets/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-s3-assets/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..52f4c907d8b07 --- /dev/null +++ b/packages/@aws-cdk/aws-s3-assets/rosetta/default.ts-fixture @@ -0,0 +1,12 @@ +// Fixture with packages imported, but nothing else +import { Construct } from 'constructs'; +import { BundlingOptions, BundlingOutput, DockerImage, ILocalBundling, Stack } from '@aws-cdk/core'; +import * as assets from '@aws-cdk/aws-s3-assets'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-s3-deployment/README.md b/packages/@aws-cdk/aws-s3-deployment/README.md index 37a571b075f40..f9bff70495c1d 100644 --- a/packages/@aws-cdk/aws-s3-deployment/README.md +++ b/packages/@aws-cdk/aws-s3-deployment/README.md @@ -20,13 +20,13 @@ enabled and populates it from a local directory on disk. ```ts const websiteBucket = new s3.Bucket(this, 'WebsiteBucket', { websiteIndexDocument: 'index.html', - publicReadAccess: true + publicReadAccess: true, }); new s3deploy.BucketDeployment(this, 'DeployWebsite', { sources: [s3deploy.Source.asset('./website-dist')], destinationBucket: websiteBucket, - destinationKeyPrefix: 'web/static' // optional prefix in destination bucket + destinationKeyPrefix: 'web/static', // optional prefix in destination bucket }); ``` @@ -110,6 +110,7 @@ when the `BucketDeployment` resource is created or updated. You can use the opti this behavior, in which case the files will not be deleted. ```ts +declare const destinationBucket: s3.Bucket; new s3deploy.BucketDeployment(this, 'DeployMeWithoutDeletingFilesOnDestination', { sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))], destinationBucket, @@ -122,17 +123,18 @@ each with its own characteristics. For example, you can set different cache-cont based on file extensions: ```ts -new BucketDeployment(this, 'BucketDeployment', { - sources: [Source.asset('./website', { exclude: ['index.html'] })], - destinationBucket: bucket, - cacheControl: [CacheControl.fromString('max-age=31536000,public,immutable')], +declare const destinationBucket: s3.Bucket; +new s3deploy.BucketDeployment(this, 'BucketDeployment', { + sources: [s3deploy.Source.asset('./website', { exclude: ['index.html'] })], + destinationBucket, + cacheControl: [s3deploy.CacheControl.fromString('max-age=31536000,public,immutable')], prune: false, }); -new BucketDeployment(this, 'HTMLBucketDeployment', { - sources: [Source.asset('./website', { exclude: ['*', '!index.html'] })], - destinationBucket: bucket, - cacheControl: [CacheControl.fromString('max-age=0,no-cache,no-store,must-revalidate')], +new s3deploy.BucketDeployment(this, 'HTMLBucketDeployment', { + sources: [s3deploy.Source.asset('./website', { exclude: ['*', '!index.html'] })], + destinationBucket, + cacheControl: [s3deploy.CacheControl.fromString('max-age=0,no-cache,no-store,must-revalidate')], prune: false, }); ``` @@ -142,19 +144,21 @@ new BucketDeployment(this, 'HTMLBucketDeployment', { There are two points at which filters are evaluated in a deployment: asset bundling and the actual deployment. If you simply want to exclude files in the asset bundling process, you should leverage the `exclude` property of `AssetOptions` when defining your source: ```ts -new BucketDeployment(this, 'HTMLBucketDeployment', { - sources: [Source.asset('./website', { exclude: ['*', '!index.html'] })], - destinationBucket: bucket, +declare const destinationBucket: s3.Bucket; +new s3deploy.BucketDeployment(this, 'HTMLBucketDeployment', { + sources: [s3deploy.Source.asset('./website', { exclude: ['*', '!index.html'] })], + destinationBucket, }); ``` If you want to specify filters to be used in the deployment process, you can use the `exclude` and `include` filters on `BucketDeployment`. If excluded, these files will not be deployed to the destination bucket. In addition, if the file already exists in the destination bucket, it will not be deleted if you are using the `prune` option: ```ts +declare const destinationBucket: s3.Bucket; new s3deploy.BucketDeployment(this, 'DeployButExcludeSpecificFiles', { sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))], destinationBucket, - exclude: ['*.txt'] + exclude: ['*.txt'], }); ``` @@ -189,7 +193,7 @@ and [`aws s3 sync` documentation](https://docs.aws.amazon.com/cli/latest/referen ```ts const websiteBucket = new s3.Bucket(this, 'WebsiteBucket', { websiteIndexDocument: 'index.html', - publicReadAccess: true + publicReadAccess: true, }); new s3deploy.BucketDeployment(this, 'DeployWebsite', { @@ -201,9 +205,12 @@ new s3deploy.BucketDeployment(this, 'DeployWebsite', { // system-defined metadata contentType: "text/html", contentLanguage: "en", - storageClass: StorageClass.INTELLIGENT_TIERING, - serverSideEncryption: ServerSideEncryption.AES_256, - cacheControl: [CacheControl.setPublic(), CacheControl.maxAge(cdk.Duration.hours(1))], + storageClass: s3deploy.StorageClass.INTELLIGENT_TIERING, + serverSideEncryption: s3deploy.ServerSideEncryption.AES_256, + cacheControl: [ + s3deploy.CacheControl.setPublic(), + s3deploy.CacheControl.maxAge(Duration.hours(1)), + ], accessControl: s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL, }); ``` @@ -250,13 +257,16 @@ Please note that creating VPC inline may cause stack deletion failures. It is sh To avoid such condition, keep your network infra (VPC) in a separate stack and pass as props. ```ts +declare const destinationBucket: s3.Bucket; +declare const vpc: ec2.Vpc; + new s3deploy.BucketDeployment(this, 'DeployMeWithEfsStorage', { - sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))], - destinationBucket, - destinationKeyPrefix: 'efs/', - useEfs: true, - vpc: new ec2.Vpc(this, 'Vpc'), - retainOnDelete: false, + sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))], + destinationBucket, + destinationKeyPrefix: 'efs/', + useEfs: true, + vpc, + retainOnDelete: false, }); ``` diff --git a/packages/@aws-cdk/aws-s3-deployment/package.json b/packages/@aws-cdk/aws-s3-deployment/package.json index df08ee72901ab..2f5fd80f65066 100644 --- a/packages/@aws-cdk/aws-s3-deployment/package.json +++ b/packages/@aws-cdk/aws-s3-deployment/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-s3-deployment/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-s3-deployment/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..75a435a142566 --- /dev/null +++ b/packages/@aws-cdk/aws-s3-deployment/rosetta/default.ts-fixture @@ -0,0 +1,15 @@ +// Fixture with packages imported, but nothing else +import { Duration, Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as s3deploy from '@aws-cdk/aws-s3-deployment'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as ec2 from'@aws-cdk/aws-ec2'; +import * as path from 'path'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-s3-notifications/README.md b/packages/@aws-cdk/aws-s3-notifications/README.md index f054708f437fb..0b57126001cf8 100644 --- a/packages/@aws-cdk/aws-s3-notifications/README.md +++ b/packages/@aws-cdk/aws-s3-notifications/README.md @@ -18,10 +18,10 @@ The following example shows how to send a notification to an SNS topic when an object is created in an S3 bucket: ```ts -import * as s3n from '@aws-cdk/aws-s3-notifications'; +import * as sns from '@aws-cdk/aws-sns'; -const bucket = new s3.Bucket(stack, 'Bucket'); -const topic = new sns.Topic(stack, 'Topic'); +const bucket = new s3.Bucket(this, 'Bucket'); +const topic = new sns.Topic(this, 'Topic'); bucket.addEventNotification(s3.EventType.OBJECT_CREATED_PUT, new s3n.SnsDestination(topic)); ``` @@ -29,13 +29,13 @@ bucket.addEventNotification(s3.EventType.OBJECT_CREATED_PUT, new s3n.SnsDestinat The following example shows how to send a notification to a Lambda function when an object is created in an S3 bucket: ```ts -import * as s3n from '@aws-cdk/aws-s3-notifications'; +import * as lambda from '@aws-cdk/aws-lambda'; -const bucket = new s3.Bucket(stack, 'Bucket'); -const fn = new Function(this, 'MyFunction', { - runtime: Runtime.NODEJS_12_X, +const bucket = new s3.Bucket(this, 'Bucket'); +const fn = new lambda.Function(this, 'MyFunction', { + runtime: lambda.Runtime.NODEJS_12_X, handler: 'index.handler', - code: Code.fromAsset(path.join(__dirname, 'lambda-handler')), + code: lambda.Code.fromAsset(path.join(__dirname, 'lambda-handler')), }); bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.LambdaDestination(fn)); diff --git a/packages/@aws-cdk/aws-s3-notifications/package.json b/packages/@aws-cdk/aws-s3-notifications/package.json index 7c04d633bee5d..ccc6d603d1f5a 100644 --- a/packages/@aws-cdk/aws-s3-notifications/package.json +++ b/packages/@aws-cdk/aws-s3-notifications/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/aws-s3-notifications/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-s3-notifications/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..36e2218e03d06 --- /dev/null +++ b/packages/@aws-cdk/aws-s3-notifications/rosetta/default.ts-fixture @@ -0,0 +1,14 @@ +// Fixture with packages imported, but nothing else +import { Stack } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as s3n from '@aws-cdk/aws-s3-notifications'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as path from 'path'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/custom-resources/README.md b/packages/@aws-cdk/custom-resources/README.md index 890255ee7bb12..cb30aada65d7f 100644 --- a/packages/@aws-cdk/custom-resources/README.md +++ b/packages/@aws-cdk/custom-resources/README.md @@ -31,14 +31,9 @@ with a `CustomResource` and a user-provided AWS Lambda function which implements the actual handler. ```ts -import { CustomResource } from '@aws-cdk/core'; -import * as logs from '@aws-cdk/aws-logs'; -import * as iam from '@aws-cdk/aws-iam'; -import * as cr from '@aws-cdk/custom-resources'; - -const onEvent = new lambda.Function(this, 'MyHandler', { /* ... */ }); - -const myRole = new iam.Role(this, 'MyRole', { /* ... */ }); +declare const onEvent: lambda.Function; +declare const isComplete: lambda.Function; +declare const myRole: iam.Role; const myProvider = new cr.Provider(this, 'MyProvider', { onEventHandler: onEvent, @@ -275,10 +270,12 @@ to all buckets: ```ts new lambda.Function(this, 'OnEventHandler', { - // ... + runtime: lambda.Runtime.NODEJS_14_X, + handler: 'index.handler', + code: lambda.Code.fromInline('my code'), initialPolicy: [ - new iam.PolicyStatement({ actions: [ 's3:GetObject*' ], resources: [ '*' ] }) - ] + new iam.PolicyStatement({ actions: [ 's3:GetObject*' ], resources: [ '*' ] }), + ], }); ``` @@ -309,12 +306,15 @@ The following example will create the file `folder/file1.txt` inside `myBucket` with the contents `hello!`. -```ts -new S3File(this, 'MyFile', { +```plaintext +// This example exists only for TypeScript + +declare const myBucket: s3.Bucket; +new cr.S3File(this, 'MyFile', { bucket: myBucket, objectKey: 'folder/file1.txt', // optional content: 'hello!', - public: true // optional + public: true, // optional }); ``` @@ -334,11 +334,14 @@ Checks that the textual contents of an S3 object matches a certain value. The ch The following example defines an `S3Assert` resource which waits until `myfile.txt` in `myBucket` exists and includes the contents `foo bar`: -```ts -new S3Assert(this, 'AssertMyFile', { +```plaintext +// This example exists only for TypeScript + +declare const myBucket: s3.Bucket; +new cr.S3Assert(this, 'AssertMyFile', { bucket: myBucket, objectKey: 'myfile.txt', - expectedContent: 'foo bar' + expectedContent: 'foo bar', }); ``` @@ -356,7 +359,9 @@ stacks it may be useful to manually set a name for the Provider Function Lambda have a predefined service token ARN. ```ts - +declare const onEvent: lambda.Function; +declare const isComplete: lambda.Function; +declare const myRole: iam.Role; const myProvider = new cr.Provider(this, 'MyProvider', { onEventHandler: onEvent, isCompleteHandler: isComplete, @@ -409,26 +414,30 @@ resources. Chained API calls can be achieved by creating dependencies: ```ts -const awsCustom1 = new AwsCustomResource(this, 'API1', { +const awsCustom1 = new cr.AwsCustomResource(this, 'API1', { onCreate: { service: '...', action: '...', - physicalResourceId: PhysicalResourceId.of('...') + physicalResourceId: cr.PhysicalResourceId.of('...'), }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); -const awsCustom2 = new AwsCustomResource(this, 'API2', { +const awsCustom2 = new cr.AwsCustomResource(this, 'API2', { onCreate: { service: '...', - action: '...' + action: '...', parameters: { - text: awsCustom1.getResponseField('Items.0.text') + text: awsCustom1.getResponseField('Items.0.text'), }, - physicalResourceId: PhysicalResourceId.of('...') + physicalResourceId: cr.PhysicalResourceId.of('...'), }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) -}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), +}); ``` ### Physical Resource Id Parameter @@ -436,24 +445,26 @@ const awsCustom2 = new AwsCustomResource(this, 'API2', { Some AWS APIs may require passing the physical resource id in as a parameter for doing updates and deletes. You can pass it by using `PhysicalResourceIdReference`. ```ts -const awsCustom = new AwsCustomResource(this, '...', { +const awsCustom = new cr.AwsCustomResource(this, 'aws-custom', { onCreate: { service: '...', - action: '...' + action: '...', parameters: { - text: '...' + text: '...', }, - physicalResourceId: PhysicalResourceId.of('...') + physicalResourceId: cr.PhysicalResourceId.of('...'), }, onUpdate: { service: '...', - action: '...'. + action: '...', parameters: { text: '...', - resourceId: new PhysicalResourceIdReference() - } + resourceId: new cr.PhysicalResourceIdReference(), + }, }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }) ``` @@ -476,13 +487,16 @@ Use the `role`, `timeout`, `logRetention` and `functionName` properties to custo the Lambda function implementing the custom resource: ```ts -new AwsCustomResource(this, 'Customized', { - // other props here +declare const myRole: iam.Role; +new cr.AwsCustomResource(this, 'Customized', { role: myRole, // must be assumable by the `lambda.amazonaws.com` service principal - timeout: cdk.Duration.minutes(10) // defaults to 2 minutes - logRetention: logs.RetentionDays.ONE_WEEK // defaults to never delete logs + timeout: Duration.minutes(10), // defaults to 2 minutes + logRetention: logs.RetentionDays.ONE_WEEK, // defaults to never delete logs functionName: 'my-custom-name', // defaults to a CloudFormation generated name -}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), +}); ``` ### Restricting the output of the Custom Resource @@ -492,17 +506,19 @@ objects. If your API call returns an object that exceeds this limit, you can res the data returned by the custom resource to specific paths in the API response: ```ts -new AwsCustomResource(stack, 'ListObjects', { +new cr.AwsCustomResource(this, 'ListObjects', { onCreate: { service: 's3', action: 'listObjectsV2', parameters: { Bucket: 'my-bucket', }, - physicalResourceId: PhysicalResourceId.of('id'), + physicalResourceId: cr.PhysicalResourceId.of('id'), outputPaths: ['Contents.0.Key', 'Contents.1.Key'], // Output only the two first keys }, - policy: AwsCustomResourcePolicy.fromSdkCalls({ resources: AwsCustomResourcePolicy.ANY_RESOURCE }), + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); ``` @@ -514,49 +530,56 @@ path in `PhysicalResourceId.fromResponse()`. #### Verify a domain with SES ```ts -const verifyDomainIdentity = new AwsCustomResource(this, 'VerifyDomainIdentity', { +import * as route53 from '@aws-cdk/aws-route53'; + +const verifyDomainIdentity = new cr.AwsCustomResource(this, 'VerifyDomainIdentity', { onCreate: { service: 'SES', action: 'verifyDomainIdentity', parameters: { - Domain: 'example.com' + Domain: 'example.com', }, - physicalResourceId: PhysicalResourceId.fromResponse('VerificationToken') // Use the token returned by the call as physical id + physicalResourceId: cr.PhysicalResourceId.fromResponse('VerificationToken'), // Use the token returned by the call as physical id }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); +declare const zone: route53.HostedZone; new route53.TxtRecord(this, 'SESVerificationRecord', { zone, recordName: `_amazonses.example.com`, - values: [verifyDomainIdentity.getResponseField('VerificationToken')] + values: [verifyDomainIdentity.getResponseField('VerificationToken')], }); ``` #### Get the latest version of a secure SSM parameter ```ts -const getParameter = new AwsCustomResource(this, 'GetParameter', { +const getParameter = new cr.AwsCustomResource(this, 'GetParameter', { onUpdate: { // will also be called for a CREATE event service: 'SSM', action: 'getParameter', parameters: { Name: 'my-parameter', - WithDecryption: true + WithDecryption: true, }, - physicalResourceId: PhysicalResourceId.of(Date.now().toString()) // Update physical id to always fetch the latest version + physicalResourceId: cr.PhysicalResourceId.of(Date.now().toString()), // Update physical id to always fetch the latest version }, - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); // Use the value in another construct with -getParameter.getResponseField('Parameter.Value') +getParameter.getResponseField('Parameter.Value'); ``` #### Associate a PrivateHostedZone with VPC shared from another account ```ts -const getParameter = new AwsCustomResource(this, 'AssociateVPCWithHostedZone', { +const getParameter = new cr.AwsCustomResource(this, 'AssociateVPCWithHostedZone', { onCreate: { assumedRoleArn: 'arn:aws:iam::OTHERACCOUNT:role/CrossAccount/ManageHostedZoneConnections', service: 'Route53', @@ -564,16 +587,17 @@ const getParameter = new AwsCustomResource(this, 'AssociateVPCWithHostedZone', { parameters: { HostedZoneId: 'hz-123', VPC: { - VPCId: 'vpc-123', - VPCRegion: 'region-for-vpc' - } + VPCId: 'vpc-123', + VPCRegion: 'region-for-vpc', + }, }, - physicalResourceId: PhysicalResourceId.of('${vpcStack.SharedVpc.VpcId}-${vpcStack.Region}-${PrivateHostedZone.HostedZoneId}') + physicalResourceId: cr.PhysicalResourceId.of('${vpcStack.SharedVpc.VpcId}-${vpcStack.Region}-${PrivateHostedZone.HostedZoneId}'), }, //Will ignore any resource and use the assumedRoleArn as resource and 'sts:AssumeRole' for service:action - policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) + policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ + resources: cr.AwsCustomResourcePolicy.ANY_RESOURCE, + }), }); - ``` --- diff --git a/packages/@aws-cdk/custom-resources/package.json b/packages/@aws-cdk/custom-resources/package.json index e14d766ee5367..7a6064ef86ecb 100644 --- a/packages/@aws-cdk/custom-resources/package.json +++ b/packages/@aws-cdk/custom-resources/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", diff --git a/packages/@aws-cdk/custom-resources/rosetta/default.ts-fixture b/packages/@aws-cdk/custom-resources/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..b80888ebeedd0 --- /dev/null +++ b/packages/@aws-cdk/custom-resources/rosetta/default.ts-fixture @@ -0,0 +1,16 @@ +// Fixture with packages imported, but nothing else +import { Construct } from 'constructs'; +import { CustomResource, Duration, Stack } from '@aws-cdk/core'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as iam from '@aws-cdk/aws-iam'; +import * as cr from '@aws-cdk/custom-resources'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as logs from '@aws-cdk/aws-logs'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +}