From 918b70669a049a24a104e80a796072f8e935876c Mon Sep 17 00:00:00 2001 From: Isaiah Vita <82135527+isaiahvita@users.noreply.github.com> Date: Tue, 9 May 2023 07:34:50 -0700 Subject: [PATCH 1/7] fix release notes (#4831) --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5408e2be020..5e42b597d76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ Release v1.44.259 (2023-05-08) ### Service Client Updates * `service/glue`: Updates service API and documentation - * We don't do release notes https://w.amazon.com/bin/view/AWSDocs/common-tasks/release-notes + * Support large worker types G.4x and G.8x for Glue Spark” * `service/guardduty`: Updates service API and documentation * Add AccessDeniedException 403 Error message code to support 3 Tagging related APIs * `service/iotsitewise`: Updates service API and documentation From 9d4285e4b352b5adc7e42680cf12e38edf66853b Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Tue, 9 May 2023 11:27:57 -0700 Subject: [PATCH 2/7] Release v1.44.260 (2023-05-09) (#4833) Release v1.44.260 (2023-05-09) === ### Service Client Updates * `service/application-autoscaling`: Updates service API, documentation, and examples * `service/glue`: Updates service API and documentation * This release adds AmazonRedshift Source and Target nodes in addition to DynamicTransform OutputSchemas * `service/sagemaker`: Updates service API and documentation * This release includes support for (1) Provisioned Concurrency for Amazon SageMaker Serverless Inference and (2) UpdateEndpointWeightsAndCapacities API for Serverless endpoints. --- CHANGELOG.md | 10 + aws/endpoints/defaults.go | 12 + aws/version.go | 2 +- .../2016-02-06/api-2.json | 6 +- .../2016-02-06/docs-2.json | 68 +-- .../2016-02-06/endpoint-tests-1.json | 220 +++++--- .../2016-02-06/examples-1.json | 3 + models/apis/glue/2017-03-31/api-2.json | 76 ++- models/apis/glue/2017-03-31/docs-2.json | 76 +++ models/apis/sagemaker/2017-07-24/api-2.json | 18 +- models/apis/sagemaker/2017-07-24/docs-2.json | 22 +- models/endpoints/endpoints.json | 4 + service/applicationautoscaling/api.go | 102 +++- service/applicationautoscaling/doc.go | 2 + service/glue/api.go | 504 ++++++++++++++++++ service/sagemaker/api.go | 97 +++- 16 files changed, 1088 insertions(+), 134 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e42b597d76..f1bb5a62085 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.44.260 (2023-05-09) +=== + +### Service Client Updates +* `service/application-autoscaling`: Updates service API, documentation, and examples +* `service/glue`: Updates service API and documentation + * This release adds AmazonRedshift Source and Target nodes in addition to DynamicTransform OutputSchemas +* `service/sagemaker`: Updates service API and documentation + * This release includes support for (1) Provisioned Concurrency for Amazon SageMaker Serverless Inference and (2) UpdateEndpointWeightsAndCapacities API for Serverless endpoints. + Release v1.44.259 (2023-05-08) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 26716a5ca65..0e8f4c6063f 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -3264,6 +3264,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3282,6 +3288,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, diff --git a/aws/version.go b/aws/version.go index 78d27b76cc5..af753a49130 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.259" +const SDKVersion = "1.44.260" diff --git a/models/apis/application-autoscaling/2016-02-06/api-2.json b/models/apis/application-autoscaling/2016-02-06/api-2.json index 3652c7cc986..f9115bff7c1 100644 --- a/models/apis/application-autoscaling/2016-02-06/api-2.json +++ b/models/apis/application-autoscaling/2016-02-06/api-2.json @@ -508,7 +508,8 @@ "ElastiCachePrimaryEngineCPUUtilization", "ElastiCacheReplicaEngineCPUUtilization", "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage", - "NeptuneReaderAverageCPUUtilization" + "NeptuneReaderAverageCPUUtilization", + "SageMakerVariantProvisionedConcurrencyUtilization" ] }, "MetricUnit":{"type":"string"}, @@ -682,7 +683,8 @@ "kafka:broker-storage:VolumeSize", "elasticache:replication-group:NodeGroups", "elasticache:replication-group:Replicas", - "neptune:cluster:ReadReplicaCount" + "neptune:cluster:ReadReplicaCount", + "sagemaker:variant:DesiredProvisionedConcurrency" ] }, "ScalableTarget":{ diff --git a/models/apis/application-autoscaling/2016-02-06/docs-2.json b/models/apis/application-autoscaling/2016-02-06/docs-2.json index 7cad809b8c1..9409852249d 100644 --- a/models/apis/application-autoscaling/2016-02-06/docs-2.json +++ b/models/apis/application-autoscaling/2016-02-06/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "

With Application Auto Scaling, you can configure automatic scaling for the following resources:

To learn more about Application Auto Scaling, see the Application Auto Scaling User Guide.

API Summary

The Application Auto Scaling service API includes three key sets of actions:

", + "service": "

With Application Auto Scaling, you can configure automatic scaling for the following resources:

To learn more about Application Auto Scaling, see the Application Auto Scaling User Guide.

API Summary

The Application Auto Scaling service API includes three key sets of actions:

", "operations": { "DeleteScalingPolicy": "

Deletes the specified scaling policy for an Application Auto Scaling scalable target.

Deleting a step scaling policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

For more information, see Delete a step scaling policy and Delete a target tracking scaling policy in the Application Auto Scaling User Guide.

", "DeleteScheduledAction": "

Deletes the specified scheduled action for an Application Auto Scaling scalable target.

For more information, see Delete a scheduled action in the Application Auto Scaling User Guide.

", @@ -54,9 +54,9 @@ "Cooldown": { "base": null, "refs": { - "StepScalingPolicyConfiguration$Cooldown": "

The amount of time, in seconds, to wait for a previous scaling activity to take effect.

With scale-out policies, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity. For example, when an alarm triggers a step scaling policy to increase the capacity by 2, the scaling activity completes successfully, and a cooldown period starts. If the alarm triggers again during the cooldown period but at a more aggressive step adjustment of 3, the previous increase of 2 is considered part of the current capacity. Therefore, only 1 is added to the capacity.

With scale-in policies, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the cooldown period after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the cooldown period for the scale-in activity stops and doesn't complete.

Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups and a default value of 300 for the following scalable targets:

For all other scalable targets, the default value is 0:

", - "TargetTrackingScalingPolicyConfiguration$ScaleOutCooldown": "

The amount of time, in seconds, to wait for a previous scale-out activity to take effect.

With the scale-out cooldown period, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target tracking scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, the capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity.

Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups and a default value of 300 for the following scalable targets:

For all other scalable targets, the default value is 0:

", - "TargetTrackingScalingPolicyConfiguration$ScaleInCooldown": "

The amount of time, in seconds, after a scale-in activity completes before another scale-in activity can start.

With the scale-in cooldown period, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case, the scale-in cooldown period stops and doesn't complete.

Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups and a default value of 300 for the following scalable targets:

For all other scalable targets, the default value is 0:

" + "StepScalingPolicyConfiguration$Cooldown": "

The amount of time, in seconds, to wait for a previous scaling activity to take effect.

With scale-out policies, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity. For example, when an alarm triggers a step scaling policy to increase the capacity by 2, the scaling activity completes successfully, and a cooldown period starts. If the alarm triggers again during the cooldown period but at a more aggressive step adjustment of 3, the previous increase of 2 is considered part of the current capacity. Therefore, only 1 is added to the capacity.

With scale-in policies, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the cooldown period after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the cooldown period for the scale-in activity stops and doesn't complete.

Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups and a default value of 300 for the following scalable targets:

For all other scalable targets, the default value is 0:

", + "TargetTrackingScalingPolicyConfiguration$ScaleOutCooldown": "

The amount of time, in seconds, to wait for a previous scale-out activity to take effect.

With the scale-out cooldown period, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target tracking scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, the capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity.

Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups and a default value of 300 for the following scalable targets:

For all other scalable targets, the default value is 0:

", + "TargetTrackingScalingPolicyConfiguration$ScaleInCooldown": "

The amount of time, in seconds, after a scale-in activity completes before another scale-in activity can start.

With the scale-in cooldown period, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case, the scale-in cooldown period stops and doesn't complete.

Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups and a default value of 300 for the following scalable targets:

For all other scalable targets, the default value is 0:

" } }, "CustomizedMetricSpecification": { @@ -364,7 +364,7 @@ "NotScaledReason$MaxCapacity": "

The maximum capacity.

", "NotScaledReason$MinCapacity": "

The minimum capacity.

", "NotScaledReason$CurrentCapacity": "

The current capacity.

", - "RegisterScalableTargetRequest$MinCapacity": "

The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand. This property is required when registering a new scalable target.

For the following resources, the minimum value allowed is 0.

It's strongly recommended that you specify a value greater than 0. A value greater than 0 means that data points are continuously reported to CloudWatch that scaling policies can use to scale on a metric like average CPU utilization.

For all other resources, the minimum allowed value depends on the type of resource that you are using. If you provide a value that is lower than what a resource can accept, an error occurs. In which case, the error message will provide the minimum value that the resource can accept.

", + "RegisterScalableTargetRequest$MinCapacity": "

The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand. This property is required when registering a new scalable target.

For the following resources, the minimum value allowed is 0.

It's strongly recommended that you specify a value greater than 0. A value greater than 0 means that data points are continuously reported to CloudWatch that scaling policies can use to scale on a metric like average CPU utilization.

For all other resources, the minimum allowed value depends on the type of resource that you are using. If you provide a value that is lower than what a resource can accept, an error occurs. In which case, the error message will provide the minimum value that the resource can accept.

", "RegisterScalableTargetRequest$MaxCapacity": "

The maximum value that you plan to scale out to. When a scaling policy is in effect, Application Auto Scaling can scale out (expand) as needed to the maximum capacity limit in response to changing demand. This property is required when registering a new scalable target.

Although you can specify a large maximum capacity, note that service quotas might impose lower limits. Each service has its own default quotas for the maximum capacity of the resource. If you want to specify a higher limit, you can request an increase. For more information, consult the documentation for that service. For information about the default quotas for each service, see Service endpoints and quotas in the Amazon Web Services General Reference.

", "ScalableTarget$MinCapacity": "

The minimum value to scale to in response to a scale-in activity.

", "ScalableTarget$MaxCapacity": "

The maximum value to scale to in response to a scale-out activity.

", @@ -384,36 +384,36 @@ "base": null, "refs": { "DeleteScalingPolicyRequest$PolicyName": "

The name of the scaling policy.

", - "DeleteScalingPolicyRequest$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", + "DeleteScalingPolicyRequest$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", "DeleteScheduledActionRequest$ScheduledActionName": "

The name of the scheduled action.

", - "DeleteScheduledActionRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", - "DeregisterScalableTargetRequest$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", - "DescribeScalingActivitiesRequest$ResourceId": "

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

", - "DescribeScalingPoliciesRequest$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", - "DescribeScheduledActionsRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", - "PutScalingPolicyRequest$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", + "DeleteScheduledActionRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", + "DeregisterScalableTargetRequest$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", + "DescribeScalingActivitiesRequest$ResourceId": "

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

", + "DescribeScalingPoliciesRequest$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", + "DescribeScheduledActionsRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", + "PutScalingPolicyRequest$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", "PutScalingPolicyResponse$PolicyARN": "

The Amazon Resource Name (ARN) of the resulting scaling policy.

", "PutScheduledActionRequest$Schedule": "

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Cron expressions are useful for scheduled actions that run periodically at a specified date and time, and rate expressions are useful for scheduled actions that run at a regular interval.

At and cron expressions use Universal Coordinated Time (UTC) by default.

The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information and examples, see Example scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide.

", "PutScheduledActionRequest$Timezone": "

Specifies the time zone used when setting a scheduled action by using an at or cron expression. If a time zone is not provided, UTC is used by default.

Valid values are the canonical names of the IANA time zones supported by Joda-Time (such as Etc/GMT+9 or Pacific/Tahiti). For more information, see https://www.joda.org/joda-time/timezones.html.

", - "PutScheduledActionRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", - "RegisterScalableTargetRequest$ResourceId": "

The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier.

", + "PutScheduledActionRequest$ResourceId": "

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

", + "RegisterScalableTargetRequest$ResourceId": "

The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier.

", "RegisterScalableTargetRequest$RoleARN": "

This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which it creates if it does not yet exist. For more information, see Application Auto Scaling IAM roles.

", "ResourceIdsMaxLen1600$member": null, - "ScalableTarget$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", + "ScalableTarget$ResourceId": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", "ScalableTarget$RoleARN": "

The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

", - "ScalingActivity$ResourceId": "

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

", + "ScalingActivity$ResourceId": "

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

", "ScalingPolicy$PolicyARN": "

The Amazon Resource Name (ARN) of the scaling policy.

", - "ScalingPolicy$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", + "ScalingPolicy$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

", "ScheduledAction$ScheduledActionARN": "

The Amazon Resource Name (ARN) of the scheduled action.

", "ScheduledAction$Schedule": "

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Cron expressions are useful for scheduled actions that run periodically at a specified date and time, and rate expressions are useful for scheduled actions that run at a regular interval.

At and cron expressions use Universal Coordinated Time (UTC) by default.

The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information and examples, see Example scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide.

", "ScheduledAction$Timezone": "

The time zone used when referring to the date and time of a scheduled action, when the scheduled action uses an at or cron expression.

", - "ScheduledAction$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" + "ScheduledAction$ResourceId": "

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" } }, "ResourceIdsMaxLen1600": { "base": null, "refs": { - "DescribeScalableTargetsRequest$ResourceIds": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", + "DescribeScalableTargetsRequest$ResourceIds": "

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

", "DescribeScalingPoliciesRequest$PolicyNames": "

The names of the scaling policies to describe.

", "DescribeScheduledActionsRequest$ScheduledActionNames": "

The names of the scheduled actions to describe.

" } @@ -438,20 +438,20 @@ "ScalableDimension": { "base": null, "refs": { - "DeleteScalingPolicyRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "DeleteScheduledActionRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "DeregisterScalableTargetRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", - "DescribeScalableTargetsRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", - "DescribeScalingActivitiesRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", - "DescribeScalingPoliciesRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", - "DescribeScheduledActionsRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", - "PutScalingPolicyRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "PutScheduledActionRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "RegisterScalableTargetRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", - "ScalableTarget$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", - "ScalingActivity$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "ScalingPolicy$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", - "ScheduledAction$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "DeleteScalingPolicyRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "DeleteScheduledActionRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "DeregisterScalableTargetRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", + "DescribeScalableTargetsRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", + "DescribeScalingActivitiesRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", + "DescribeScalingPoliciesRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", + "DescribeScheduledActionsRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

", + "PutScalingPolicyRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "PutScheduledActionRequest$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "RegisterScalableTargetRequest$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", + "ScalableTarget$ScalableDimension": "

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

", + "ScalingActivity$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "ScalingPolicy$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

", + "ScheduledAction$ScalableDimension": "

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" } }, "ScalableTarget": { @@ -599,7 +599,7 @@ "refs": { "ListTagsForResourceResponse$Tags": "

A list of tags. Each tag consists of a tag key and a tag value.

", "RegisterScalableTargetRequest$Tags": "

Assigns one or more tags to the scalable target. Use this parameter to tag the scalable target when it is created. To tag an existing scalable target, use the TagResource operation.

Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required. You cannot have more than one tag on a scalable target with the same tag key.

Use tags to control access to a scalable target. For more information, see Tagging support for Application Auto Scaling in the Application Auto Scaling User Guide.

", - "TagResourceRequest$Tags": "

The tags assigned to the resource. A tag is a label that you assign to an AWS resource.

Each tag consists of a tag key and a tag value.

You cannot have more than one tag on an Application Auto Scaling scalable target with the same tag key. If you specify an existing tag key with a different tag value, Application Auto Scaling replaces the current tag value with the specified one.

For information about the rules that apply to tag keys and tag values, see User-defined tag restrictions in the Amazon Web Services Billing and Cost Management User Guide.

" + "TagResourceRequest$Tags": "

The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource.

Each tag consists of a tag key and a tag value.

You cannot have more than one tag on an Application Auto Scaling scalable target with the same tag key. If you specify an existing tag key with a different tag value, Application Auto Scaling replaces the current tag value with the specified one.

For information about the rules that apply to tag keys and tag values, see User-defined tag restrictions in the Amazon Web Services Billing and Cost Management User Guide.

" } }, "TagResourceRequest": { diff --git a/models/apis/application-autoscaling/2016-02-06/endpoint-tests-1.json b/models/apis/application-autoscaling/2016-02-06/endpoint-tests-1.json index b8bca66a613..66cc57110ad 100644 --- a/models/apis/application-autoscaling/2016-02-06/endpoint-tests-1.json +++ b/models/apis/application-autoscaling/2016-02-06/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "af-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "ap-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-3" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "ap-southeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "ap-southeast-3", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "eu-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { + "Region": "eu-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { + "Region": "eu-west-3", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { @@ -216,9 +216,9 @@ } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { @@ -242,9 +242,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -268,9 +268,9 @@ } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -281,9 +281,9 @@ } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -294,9 +294,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -307,9 +307,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -320,9 +320,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -333,9 +333,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -346,9 +346,9 @@ } }, "params": { + "Region": "cn-northwest-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-northwest-1" + "UseDualStack": false } }, { @@ -359,9 +359,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -372,9 +372,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -385,9 +385,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -398,9 +398,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -411,9 +411,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -424,9 +424,9 @@ } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -437,9 +437,9 @@ } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -450,9 +450,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -463,9 +463,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -476,9 +476,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -489,9 +489,20 @@ } }, "params": { + "Region": "us-iso-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-west-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -502,9 +513,20 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -515,9 +537,20 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -528,9 +561,20 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -541,9 +585,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -566,9 +610,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -578,11 +622,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/application-autoscaling/2016-02-06/examples-1.json b/models/apis/application-autoscaling/2016-02-06/examples-1.json index 86eca052d86..ccf44495e62 100644 --- a/models/apis/application-autoscaling/2016-02-06/examples-1.json +++ b/models/apis/application-autoscaling/2016-02-06/examples-1.json @@ -319,6 +319,9 @@ "ScalableDimension": "ecs:service:DesiredCount", "ServiceNamespace": "ecs" }, + "output": { + "ScalableTargetARN": "arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123" + }, "comments": { "input": { }, diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index daeabd64536..d9cdd7a67fa 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -3264,6 +3264,63 @@ }, "exception":true }, + "AmazonRedshiftAdvancedOption":{ + "type":"structure", + "members":{ + "Key":{"shape":"GenericString"}, + "Value":{"shape":"GenericString"} + } + }, + "AmazonRedshiftAdvancedOptions":{ + "type":"list", + "member":{"shape":"AmazonRedshiftAdvancedOption"} + }, + "AmazonRedshiftNodeData":{ + "type":"structure", + "members":{ + "AccessType":{"shape":"GenericLimitedString"}, + "SourceType":{"shape":"GenericLimitedString"}, + "Connection":{"shape":"Option"}, + "Schema":{"shape":"Option"}, + "Table":{"shape":"Option"}, + "CatalogDatabase":{"shape":"Option"}, + "CatalogTable":{"shape":"Option"}, + "CatalogRedshiftSchema":{"shape":"GenericString"}, + "CatalogRedshiftTable":{"shape":"GenericString"}, + "TempDir":{"shape":"EnclosedInStringProperty"}, + "IamRole":{"shape":"Option"}, + "AdvancedOptions":{"shape":"AmazonRedshiftAdvancedOptions"}, + "SampleQuery":{"shape":"GenericString"}, + "PreAction":{"shape":"GenericString"}, + "PostAction":{"shape":"GenericString"}, + "Action":{"shape":"GenericString"}, + "TablePrefix":{"shape":"GenericLimitedString"}, + "Upsert":{"shape":"BooleanValue"}, + "MergeAction":{"shape":"GenericLimitedString"}, + "MergeWhenMatched":{"shape":"GenericLimitedString"}, + "MergeWhenNotMatched":{"shape":"GenericLimitedString"}, + "MergeClause":{"shape":"GenericString"}, + "CrawlerConnection":{"shape":"GenericString"}, + "TableSchema":{"shape":"OptionList"}, + "StagingTable":{"shape":"GenericString"}, + "SelectedColumns":{"shape":"OptionList"} + } + }, + "AmazonRedshiftSource":{ + "type":"structure", + "members":{ + "Name":{"shape":"NodeName"}, + "Data":{"shape":"AmazonRedshiftNodeData"} + } + }, + "AmazonRedshiftTarget":{ + "type":"structure", + "members":{ + "Name":{"shape":"NodeName"}, + "Data":{"shape":"AmazonRedshiftNodeData"}, + "Inputs":{"shape":"OneInput"} + } + }, "ApplyMapping":{ "type":"structure", "required":[ @@ -4204,7 +4261,9 @@ "CatalogDeltaSource":{"shape":"CatalogDeltaSource"}, "S3DeltaSource":{"shape":"S3DeltaSource"}, "S3DeltaCatalogTarget":{"shape":"S3DeltaCatalogTarget"}, - "S3DeltaDirectTarget":{"shape":"S3DeltaDirectTarget"} + "S3DeltaDirectTarget":{"shape":"S3DeltaDirectTarget"}, + "AmazonRedshiftSource":{"shape":"AmazonRedshiftSource"}, + "AmazonRedshiftTarget":{"shape":"AmazonRedshiftTarget"} } }, "CodeGenConfigurationNodes":{ @@ -6328,7 +6387,8 @@ "Parameters":{"shape":"TransformConfigParameterList"}, "FunctionName":{"shape":"EnclosedInStringProperty"}, "Path":{"shape":"EnclosedInStringProperty"}, - "Version":{"shape":"EnclosedInStringProperty"} + "Version":{"shape":"EnclosedInStringProperty"}, + "OutputSchemas":{"shape":"GlueSchemas"} } }, "DynamoDBCatalogSource":{ @@ -9364,6 +9424,18 @@ }, "exception":true }, + "Option":{ + "type":"structure", + "members":{ + "Value":{"shape":"EnclosedInStringProperty"}, + "Label":{"shape":"EnclosedInStringProperty"}, + "Description":{"shape":"EnclosedInStringProperty"} + } + }, + "OptionList":{ + "type":"list", + "member":{"shape":"Option"} + }, "OracleSQLCatalogSource":{ "type":"structure", "required":[ diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index 49b651ceef1..3f819883df8 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -284,6 +284,37 @@ "refs": { } }, + "AmazonRedshiftAdvancedOption": { + "base": "

Specifies an Amazon Redshift data store.

", + "refs": { + "AmazonRedshiftAdvancedOptions$member": null + } + }, + "AmazonRedshiftAdvancedOptions": { + "base": null, + "refs": { + "AmazonRedshiftNodeData$AdvancedOptions": "

Optional values when connecting to the Redshift cluster.

" + } + }, + "AmazonRedshiftNodeData": { + "base": "

Specifies an Amazon Redshift node.

", + "refs": { + "AmazonRedshiftSource$Data": "

Specifies the data of the Amazon Reshift source node.

", + "AmazonRedshiftTarget$Data": "

Specifies the data of the Amazon Reshift target node.

" + } + }, + "AmazonRedshiftSource": { + "base": "

Specifies an Amazon Redshift source.

", + "refs": { + "CodeGenConfigurationNode$AmazonRedshiftSource": "

Specifies a target that writes to a data source in Amazon Redshift.

" + } + }, + "AmazonRedshiftTarget": { + "base": "

Specifies an Amazon Redshift target.

", + "refs": { + "CodeGenConfigurationNode$AmazonRedshiftTarget": "

Specifies a target that writes to a data target in Amazon Redshift.

" + } + }, "ApplyMapping": { "base": "

Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.

", "refs": { @@ -726,6 +757,7 @@ "BooleanValue": { "base": null, "refs": { + "AmazonRedshiftNodeData$Upsert": "

The action used on Redshift sinks when doing an APPEND.

", "CreateTriggerRequest$StartOnCreation": "

Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers.

", "GetJobRunRequest$PredecessorsIncluded": "

True if a list of predecessor runs should be returned.

", "S3CsvSource$OptimizePerformance": "

A Boolean value that specifies whether to use the advanced SIMD CSV reader along with Apache Arrow based columnar memory formats. Only available in Glue version 3.0.

", @@ -2707,6 +2739,7 @@ "refs": { "AdditionalOptions$key": null, "AdditionalOptions$value": null, + "AmazonRedshiftNodeData$TempDir": "

The Amazon S3 path where temporary data can be staged when copying out of the database.

", "AthenaConnectorSource$ConnectionName": "

The name of the connection that is associated with the connector.

", "AthenaConnectorSource$ConnectorName": "

The name of a connector that assists with accessing the data store in Glue Studio.

", "AthenaConnectorSource$ConnectionType": "

The type of connection, such as marketplace.athena or custom.athena, designating a connection to an Amazon Athena data store.

", @@ -2790,6 +2823,9 @@ "MySQLCatalogTarget$Database": "

The name of the database to write to.

", "MySQLCatalogTarget$Table": "

The name of the table in the database to write to.

", "NullValueField$Value": "

The value of the null placeholder.

", + "Option$Value": "

Specifies the value of the option.

", + "Option$Label": "

Specifies the label of the option.

", + "Option$Description": "

Specifies the description of the option.

", "OracleSQLCatalogSource$Database": "

The name of the database to read from.

", "OracleSQLCatalogSource$Table": "

The name of the table in the database to read from.

", "OracleSQLCatalogTarget$Database": "

The name of the database to write to.

", @@ -3176,6 +3212,12 @@ "GenericLimitedString": { "base": null, "refs": { + "AmazonRedshiftNodeData$AccessType": "

The access type for the Redshift connection. Can be a direct connection or catalog connections.

", + "AmazonRedshiftNodeData$SourceType": "

The source type to specify whether a specific table is the source or a custom query.

", + "AmazonRedshiftNodeData$TablePrefix": "

Specifies the prefix to a table.

", + "AmazonRedshiftNodeData$MergeAction": "

The action used when to detemine how a MERGE in a Redshift sink will be handled.

", + "AmazonRedshiftNodeData$MergeWhenMatched": "

The action used when to detemine how a MERGE in a Redshift sink will be handled when an existing record matches a new record.

", + "AmazonRedshiftNodeData$MergeWhenNotMatched": "

The action used when to detemine how a MERGE in a Redshift sink will be handled when an existing record doesn't match a new record.

", "DQResultsPublishingOptions$EvaluationContext": "

The context of the evaluation.

", "Datatype$Id": "

The datatype of the value.

", "Datatype$Label": "

A label assigned to the datatype.

", @@ -3201,6 +3243,17 @@ "refs": { "AdditionalPlanOptionsMap$key": null, "AdditionalPlanOptionsMap$value": null, + "AmazonRedshiftAdvancedOption$Key": "

The key when specifying a key-value pair.

", + "AmazonRedshiftAdvancedOption$Value": "

The value when specifying a key-value pair.

", + "AmazonRedshiftNodeData$CatalogRedshiftSchema": "

The Redshift schema name when working with a data catalog.

", + "AmazonRedshiftNodeData$CatalogRedshiftTable": "

The database table to read from.

", + "AmazonRedshiftNodeData$SampleQuery": "

The SQL used to fetch the data from a Redshift sources when the SourceType is 'query'.

", + "AmazonRedshiftNodeData$PreAction": "

The SQL used before a MERGE or APPEND with upsert is run.

", + "AmazonRedshiftNodeData$PostAction": "

The SQL used before a MERGE or APPEND with upsert is run.

", + "AmazonRedshiftNodeData$Action": "

Specifies how writing to a Redshift cluser will occur.

", + "AmazonRedshiftNodeData$MergeClause": "

The SQL used in a custom merge to deal with matching records.

", + "AmazonRedshiftNodeData$CrawlerConnection": "

Specifies the name of the connection that is associated with the catalog table used.

", + "AmazonRedshiftNodeData$StagingTable": "

The name of the temporary staging table that is used when doing a MERGE or APPEND with upsert.

", "Blueprint$BlueprintLocation": "

Specifies the path in Amazon S3 where the blueprint is published.

", "Blueprint$BlueprintServiceLocation": "

Specifies a path in Amazon S3 where the blueprint is copied when you call CreateBlueprint/UpdateBlueprint to register the blueprint in Glue.

", "CrawlsFilter$FieldValue": "

The value provided for comparison on the crawl field.

", @@ -4030,6 +4083,7 @@ "CatalogDeltaSource$OutputSchemas": "

Specifies the data schema for the Delta Lake source.

", "CatalogHudiSource$OutputSchemas": "

Specifies the data schema for the Hudi source.

", "CustomCode$OutputSchemas": "

Specifies the data schema for the custom code transform.

", + "DynamicTransform$OutputSchemas": "

Specifies the data schema for the dynamic transform.

", "JDBCConnectorSource$OutputSchemas": "

Specifies the data schema for the custom JDBC source.

", "JDBCConnectorTarget$OutputSchemas": "

Specifies the data schema for the JDBC target.

", "S3CatalogDeltaSource$OutputSchemas": "

Specifies the data schema for the Delta Lake source.

", @@ -5527,6 +5581,8 @@ "base": null, "refs": { "Aggregate$Name": "

The name of the transform node.

", + "AmazonRedshiftSource$Name": "

The name of the Amazon Redshift source.

", + "AmazonRedshiftTarget$Name": "

The name of the Amazon Redshift target.

", "ApplyMapping$Name": "

The name of the transform node.

", "AthenaConnectorSource$Name": "

The name of the data source.

", "BasicCatalogTarget$Name": "

The name of your data target.

", @@ -5767,6 +5823,7 @@ "base": null, "refs": { "Aggregate$Inputs": "

Specifies the fields and rows to use as inputs for the aggregate transform.

", + "AmazonRedshiftTarget$Inputs": "

The nodes that are inputs to the data target.

", "ApplyMapping$Inputs": "

The data inputs identified by their node names.

", "BasicCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", "DropDuplicates$Inputs": "

The data inputs identified by their node names.

", @@ -5804,6 +5861,25 @@ "refs": { } }, + "Option": { + "base": "

Specifies an option value.

", + "refs": { + "AmazonRedshiftNodeData$Connection": "

The Glue connection to the Redshift cluster.

", + "AmazonRedshiftNodeData$Schema": "

The Redshift schema name when working with a direct connection.

", + "AmazonRedshiftNodeData$Table": "

The Redshift table name when working with a direct connection.

", + "AmazonRedshiftNodeData$CatalogDatabase": "

The name of the Glue Data Catalog database when working with a data catalog.

", + "AmazonRedshiftNodeData$CatalogTable": "

The Glue Data Catalog table name when working with a data catalog.

", + "AmazonRedshiftNodeData$IamRole": "

Optional. The role name use when connection to S3. The IAM role ill default to the role on the job when left blank.

", + "OptionList$member": null + } + }, + "OptionList": { + "base": null, + "refs": { + "AmazonRedshiftNodeData$TableSchema": "

The array of schema output for a given node.

", + "AmazonRedshiftNodeData$SelectedColumns": "

The list of column names used to determine a matching record when doing a MERGE or APPEND with upsert.

" + } + }, "OracleSQLCatalogSource": { "base": "

Specifies an Oracle data source in the Glue Data Catalog.

", "refs": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index ceef7bf01bc..b3f82288ca8 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -9356,7 +9356,8 @@ "members":{ "VariantName":{"shape":"VariantName"}, "DesiredWeight":{"shape":"VariantWeight"}, - "DesiredInstanceCount":{"shape":"TaskCount"} + "DesiredInstanceCount":{"shape":"TaskCount"}, + "ServerlessUpdateConfig":{"shape":"ProductionVariantServerlessUpdateConfig"} } }, "DesiredWeightAndCapacityList":{ @@ -16969,7 +16970,15 @@ ], "members":{ "MemorySizeInMB":{"shape":"ServerlessMemorySizeInMB"}, - "MaxConcurrency":{"shape":"ServerlessMaxConcurrency"} + "MaxConcurrency":{"shape":"ServerlessMaxConcurrency"}, + "ProvisionedConcurrency":{"shape":"ServerlessProvisionedConcurrency"} + } + }, + "ProductionVariantServerlessUpdateConfig":{ + "type":"structure", + "members":{ + "MaxConcurrency":{"shape":"ServerlessMaxConcurrency"}, + "ProvisionedConcurrency":{"shape":"ServerlessProvisionedConcurrency"} } }, "ProductionVariantStatus":{ @@ -18202,6 +18211,11 @@ "max":6144, "min":1024 }, + "ServerlessProvisionedConcurrency":{ + "type":"integer", + "max":200, + "min":1 + }, "ServiceCatalogEntityId":{ "type":"string", "max":100, diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 04ea6b4543a..ddf70c774fb 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -617,9 +617,9 @@ "AppSecurityGroupManagement": { "base": null, "refs": { - "CreateDomainRequest$AppSecurityGroupManagement": "

The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided.

", + "CreateDomainRequest$AppSecurityGroupManagement": "

The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service.

", "DescribeDomainResponse$AppSecurityGroupManagement": "

The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided.

", - "UpdateDomainRequest$AppSecurityGroupManagement": "

The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided.

" + "UpdateDomainRequest$AppSecurityGroupManagement": "

The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service.

" } }, "AppSortKey": { @@ -10812,6 +10812,12 @@ "ProductionVariantSummary$DesiredServerlessConfig": "

The serverless configuration requested for the endpoint update.

" } }, + "ProductionVariantServerlessUpdateConfig": { + "base": "

Specifies the serverless update concurrency configuration for an endpoint variant.

", + "refs": { + "DesiredWeightAndCapacity$ServerlessUpdateConfig": "

Specifies the serverless update concurrency configuration for an endpoint variant.

" + } + }, "ProductionVariantStatus": { "base": "

Describes the status of the production variant.

", "refs": { @@ -11140,7 +11146,7 @@ "RStudioServerProDomainSettingsForUpdate": { "base": "

A collection of settings that update the current configuration for the RStudioServerPro Domain-level app.

", "refs": { - "DomainSettingsForUpdate$RStudioServerProDomainSettingsForUpdate": "

A collection of RStudioServerPro Domain-level app settings to update.

" + "DomainSettingsForUpdate$RStudioServerProDomainSettingsForUpdate": "

A collection of RStudioServerPro Domain-level app settings to update. A single RStudioServerPro application is created for a domain.

" } }, "RStudioServerProUserGroup": { @@ -11962,7 +11968,8 @@ "ServerlessMaxConcurrency": { "base": null, "refs": { - "ProductionVariantServerlessConfig$MaxConcurrency": "

The maximum number of concurrent invocations your serverless endpoint can process.

" + "ProductionVariantServerlessConfig$MaxConcurrency": "

The maximum number of concurrent invocations your serverless endpoint can process.

", + "ProductionVariantServerlessUpdateConfig$MaxConcurrency": "

The updated maximum number of concurrent invocations your serverless endpoint can process.

" } }, "ServerlessMemorySizeInMB": { @@ -11971,6 +11978,13 @@ "ProductionVariantServerlessConfig$MemorySizeInMB": "

The memory size of your serverless endpoint. Valid values are in 1 GB increments: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB.

" } }, + "ServerlessProvisionedConcurrency": { + "base": null, + "refs": { + "ProductionVariantServerlessConfig$ProvisionedConcurrency": "

The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to MaxConcurrency.

", + "ProductionVariantServerlessUpdateConfig$ProvisionedConcurrency": "

The updated amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to MaxConcurrency.

" + } + }, "ServiceCatalogEntityId": { "base": null, "refs": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 6695f1b75e7..f3e84a7d000 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -1817,12 +1817,16 @@ }, "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } diff --git a/service/applicationautoscaling/api.go b/service/applicationautoscaling/api.go index 4bad768eca6..84c1dd92c40 100644 --- a/service/applicationautoscaling/api.go +++ b/service/applicationautoscaling/api.go @@ -1909,6 +1909,9 @@ type DeleteScalingPolicyInput struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -1976,6 +1979,9 @@ type DeleteScalingPolicyInput struct { // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -2137,6 +2143,9 @@ type DeleteScheduledActionInput struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -2204,6 +2213,9 @@ type DeleteScheduledActionInput struct { // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -2370,6 +2382,9 @@ type DeregisterScalableTargetInput struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -2437,6 +2452,9 @@ type DeregisterScalableTargetInput struct { // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -2597,6 +2615,9 @@ type DescribeScalableTargetsInput struct { // // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. + // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. ResourceIds []*string `type:"list"` // The scalable dimension associated with the scalable target. This string consists @@ -2663,6 +2684,9 @@ type DescribeScalableTargetsInput struct { // // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. + // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. ScalableDimension *string `type:"string" enum:"ScalableDimension"` // The namespace of the Amazon Web Services service that provides the resource. @@ -2851,6 +2875,9 @@ type DescribeScalingActivitiesInput struct { // // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. + // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. ResourceId *string `min:"1" type:"string"` // The scalable dimension. This string consists of the service namespace, resource @@ -2917,6 +2944,9 @@ type DescribeScalingActivitiesInput struct { // // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. + // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. ScalableDimension *string `type:"string" enum:"ScalableDimension"` // The namespace of the Amazon Web Services service that provides the resource. @@ -3110,6 +3140,9 @@ type DescribeScalingPoliciesInput struct { // // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. + // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. ResourceId *string `min:"1" type:"string"` // The scalable dimension. This string consists of the service namespace, resource @@ -3176,6 +3209,9 @@ type DescribeScalingPoliciesInput struct { // // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. + // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. ScalableDimension *string `type:"string" enum:"ScalableDimension"` // The namespace of the Amazon Web Services service that provides the resource. @@ -3366,6 +3402,9 @@ type DescribeScheduledActionsInput struct { // // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. + // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. ResourceId *string `min:"1" type:"string"` // The scalable dimension. This string consists of the service namespace, resource @@ -3432,6 +3471,9 @@ type DescribeScheduledActionsInput struct { // // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. + // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. ScalableDimension *string `type:"string" enum:"ScalableDimension"` // The names of the scheduled actions to describe. @@ -4278,6 +4320,9 @@ type PutScalingPolicyInput struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -4345,6 +4390,9 @@ type PutScalingPolicyInput struct { // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -4570,6 +4618,9 @@ type PutScheduledActionInput struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -4637,6 +4688,9 @@ type PutScheduledActionInput struct { // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -4860,6 +4914,8 @@ type RegisterScalableTargetInput struct { // // * Lambda provisioned concurrency // + // * SageMaker Serverless endpoint provisioned concurrency + // // * SageMaker endpoint variants // // * Spot Fleets @@ -4931,6 +4987,9 @@ type RegisterScalableTargetInput struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -5007,6 +5066,9 @@ type RegisterScalableTargetInput struct { // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -5315,6 +5377,9 @@ type ScalableTarget struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -5388,6 +5453,9 @@ type ScalableTarget struct { // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -5615,6 +5683,9 @@ type ScalingActivity struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -5682,6 +5753,9 @@ type ScalingActivity struct { // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -5889,6 +5963,9 @@ type ScalingPolicy struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -5956,6 +6033,9 @@ type ScalingPolicy struct { // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -6117,6 +6197,9 @@ type ScheduledAction struct { // * Neptune cluster - The resource type is cluster and the unique identifier // is the cluster name. Example: cluster:mycluster. // + // * SageMaker Serverless endpoint - The resource type is variant and the + // unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -6183,6 +6266,9 @@ type ScheduledAction struct { // // * neptune:cluster:ReadReplicaCount - The count of read replicas in an // Amazon Neptune DB cluster. + // + // * sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency + // for a SageMaker Serverless endpoint. ScalableDimension *string `type:"string" enum:"ScalableDimension"` // The new minimum and maximum capacity. You can set both values or just one. @@ -6482,6 +6568,8 @@ type StepScalingPolicyConfiguration struct { // // * Neptune clusters // + // * SageMaker Serverless endpoint provisioned concurrency + // // * SageMaker endpoint variants // // * Spot Fleets @@ -6661,7 +6749,7 @@ type TagResourceInput struct { ResourceARN *string `min:"1" type:"string" required:"true"` // The tags assigned to the resource. A tag is a label that you assign to an - // AWS resource. + // Amazon Web Services resource. // // Each tag consists of a tag key and a tag value. // @@ -7162,6 +7250,8 @@ type TargetTrackingScalingPolicyConfiguration struct { // // * Neptune clusters // + // * SageMaker Serverless endpoint provisioned concurrency + // // * SageMaker endpoint variants // // * Spot Fleets @@ -7207,6 +7297,8 @@ type TargetTrackingScalingPolicyConfiguration struct { // // * Neptune clusters // + // * SageMaker Serverless endpoint provisioned concurrency + // // * SageMaker endpoint variants // // * Spot Fleets @@ -7672,6 +7764,9 @@ const ( // MetricTypeNeptuneReaderAverageCpuutilization is a MetricType enum value MetricTypeNeptuneReaderAverageCpuutilization = "NeptuneReaderAverageCPUUtilization" + + // MetricTypeSageMakerVariantProvisionedConcurrencyUtilization is a MetricType enum value + MetricTypeSageMakerVariantProvisionedConcurrencyUtilization = "SageMakerVariantProvisionedConcurrencyUtilization" ) // MetricType_Values returns all elements of the MetricType enum @@ -7698,6 +7793,7 @@ func MetricType_Values() []string { MetricTypeElastiCacheReplicaEngineCpuutilization, MetricTypeElastiCacheDatabaseMemoryUsageCountedForEvictPercentage, MetricTypeNeptuneReaderAverageCpuutilization, + MetricTypeSageMakerVariantProvisionedConcurrencyUtilization, } } @@ -7777,6 +7873,9 @@ const ( // ScalableDimensionNeptuneClusterReadReplicaCount is a ScalableDimension enum value ScalableDimensionNeptuneClusterReadReplicaCount = "neptune:cluster:ReadReplicaCount" + + // ScalableDimensionSagemakerVariantDesiredProvisionedConcurrency is a ScalableDimension enum value + ScalableDimensionSagemakerVariantDesiredProvisionedConcurrency = "sagemaker:variant:DesiredProvisionedConcurrency" ) // ScalableDimension_Values returns all elements of the ScalableDimension enum @@ -7802,6 +7901,7 @@ func ScalableDimension_Values() []string { ScalableDimensionElasticacheReplicationGroupNodeGroups, ScalableDimensionElasticacheReplicationGroupReplicas, ScalableDimensionNeptuneClusterReadReplicaCount, + ScalableDimensionSagemakerVariantDesiredProvisionedConcurrency, } } diff --git a/service/applicationautoscaling/doc.go b/service/applicationautoscaling/doc.go index 51caf93cb74..de978102cdc 100644 --- a/service/applicationautoscaling/doc.go +++ b/service/applicationautoscaling/doc.go @@ -28,6 +28,8 @@ // // - Amazon Neptune clusters // +// - Amazon SageMaker Serverless endpoint provisioned concurrency +// // - Amazon SageMaker endpoint variants // // - Spot Fleets (Amazon EC2) diff --git a/service/glue/api.go b/service/glue/api.go index aa6dd0cead4..2a95c0e6cdd 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -21160,6 +21160,418 @@ func (s *AlreadyExistsException) RequestID() string { return s.RespMetadata.RequestID } +// Specifies an Amazon Redshift data store. +type AmazonRedshiftAdvancedOption struct { + _ struct{} `type:"structure"` + + // The key when specifying a key-value pair. + Key *string `type:"string"` + + // The value when specifying a key-value pair. + Value *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonRedshiftAdvancedOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonRedshiftAdvancedOption) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *AmazonRedshiftAdvancedOption) SetKey(v string) *AmazonRedshiftAdvancedOption { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AmazonRedshiftAdvancedOption) SetValue(v string) *AmazonRedshiftAdvancedOption { + s.Value = &v + return s +} + +// Specifies an Amazon Redshift node. +type AmazonRedshiftNodeData struct { + _ struct{} `type:"structure"` + + // The access type for the Redshift connection. Can be a direct connection or + // catalog connections. + AccessType *string `type:"string"` + + // Specifies how writing to a Redshift cluser will occur. + Action *string `type:"string"` + + // Optional values when connecting to the Redshift cluster. + AdvancedOptions []*AmazonRedshiftAdvancedOption `type:"list"` + + // The name of the Glue Data Catalog database when working with a data catalog. + CatalogDatabase *Option `type:"structure"` + + // The Redshift schema name when working with a data catalog. + CatalogRedshiftSchema *string `type:"string"` + + // The database table to read from. + CatalogRedshiftTable *string `type:"string"` + + // The Glue Data Catalog table name when working with a data catalog. + CatalogTable *Option `type:"structure"` + + // The Glue connection to the Redshift cluster. + Connection *Option `type:"structure"` + + // Specifies the name of the connection that is associated with the catalog + // table used. + CrawlerConnection *string `type:"string"` + + // Optional. The role name use when connection to S3. The IAM role ill default + // to the role on the job when left blank. + IamRole *Option `type:"structure"` + + // The action used when to detemine how a MERGE in a Redshift sink will be handled. + MergeAction *string `type:"string"` + + // The SQL used in a custom merge to deal with matching records. + MergeClause *string `type:"string"` + + // The action used when to detemine how a MERGE in a Redshift sink will be handled + // when an existing record matches a new record. + MergeWhenMatched *string `type:"string"` + + // The action used when to detemine how a MERGE in a Redshift sink will be handled + // when an existing record doesn't match a new record. + MergeWhenNotMatched *string `type:"string"` + + // The SQL used before a MERGE or APPEND with upsert is run. + PostAction *string `type:"string"` + + // The SQL used before a MERGE or APPEND with upsert is run. + PreAction *string `type:"string"` + + // The SQL used to fetch the data from a Redshift sources when the SourceType + // is 'query'. + SampleQuery *string `type:"string"` + + // The Redshift schema name when working with a direct connection. + Schema *Option `type:"structure"` + + // The list of column names used to determine a matching record when doing a + // MERGE or APPEND with upsert. + SelectedColumns []*Option `type:"list"` + + // The source type to specify whether a specific table is the source or a custom + // query. + SourceType *string `type:"string"` + + // The name of the temporary staging table that is used when doing a MERGE or + // APPEND with upsert. + StagingTable *string `type:"string"` + + // The Redshift table name when working with a direct connection. + Table *Option `type:"structure"` + + // Specifies the prefix to a table. + TablePrefix *string `type:"string"` + + // The array of schema output for a given node. + TableSchema []*Option `type:"list"` + + // The Amazon S3 path where temporary data can be staged when copying out of + // the database. + TempDir *string `type:"string"` + + // The action used on Redshift sinks when doing an APPEND. + Upsert *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonRedshiftNodeData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonRedshiftNodeData) GoString() string { + return s.String() +} + +// SetAccessType sets the AccessType field's value. +func (s *AmazonRedshiftNodeData) SetAccessType(v string) *AmazonRedshiftNodeData { + s.AccessType = &v + return s +} + +// SetAction sets the Action field's value. +func (s *AmazonRedshiftNodeData) SetAction(v string) *AmazonRedshiftNodeData { + s.Action = &v + return s +} + +// SetAdvancedOptions sets the AdvancedOptions field's value. +func (s *AmazonRedshiftNodeData) SetAdvancedOptions(v []*AmazonRedshiftAdvancedOption) *AmazonRedshiftNodeData { + s.AdvancedOptions = v + return s +} + +// SetCatalogDatabase sets the CatalogDatabase field's value. +func (s *AmazonRedshiftNodeData) SetCatalogDatabase(v *Option) *AmazonRedshiftNodeData { + s.CatalogDatabase = v + return s +} + +// SetCatalogRedshiftSchema sets the CatalogRedshiftSchema field's value. +func (s *AmazonRedshiftNodeData) SetCatalogRedshiftSchema(v string) *AmazonRedshiftNodeData { + s.CatalogRedshiftSchema = &v + return s +} + +// SetCatalogRedshiftTable sets the CatalogRedshiftTable field's value. +func (s *AmazonRedshiftNodeData) SetCatalogRedshiftTable(v string) *AmazonRedshiftNodeData { + s.CatalogRedshiftTable = &v + return s +} + +// SetCatalogTable sets the CatalogTable field's value. +func (s *AmazonRedshiftNodeData) SetCatalogTable(v *Option) *AmazonRedshiftNodeData { + s.CatalogTable = v + return s +} + +// SetConnection sets the Connection field's value. +func (s *AmazonRedshiftNodeData) SetConnection(v *Option) *AmazonRedshiftNodeData { + s.Connection = v + return s +} + +// SetCrawlerConnection sets the CrawlerConnection field's value. +func (s *AmazonRedshiftNodeData) SetCrawlerConnection(v string) *AmazonRedshiftNodeData { + s.CrawlerConnection = &v + return s +} + +// SetIamRole sets the IamRole field's value. +func (s *AmazonRedshiftNodeData) SetIamRole(v *Option) *AmazonRedshiftNodeData { + s.IamRole = v + return s +} + +// SetMergeAction sets the MergeAction field's value. +func (s *AmazonRedshiftNodeData) SetMergeAction(v string) *AmazonRedshiftNodeData { + s.MergeAction = &v + return s +} + +// SetMergeClause sets the MergeClause field's value. +func (s *AmazonRedshiftNodeData) SetMergeClause(v string) *AmazonRedshiftNodeData { + s.MergeClause = &v + return s +} + +// SetMergeWhenMatched sets the MergeWhenMatched field's value. +func (s *AmazonRedshiftNodeData) SetMergeWhenMatched(v string) *AmazonRedshiftNodeData { + s.MergeWhenMatched = &v + return s +} + +// SetMergeWhenNotMatched sets the MergeWhenNotMatched field's value. +func (s *AmazonRedshiftNodeData) SetMergeWhenNotMatched(v string) *AmazonRedshiftNodeData { + s.MergeWhenNotMatched = &v + return s +} + +// SetPostAction sets the PostAction field's value. +func (s *AmazonRedshiftNodeData) SetPostAction(v string) *AmazonRedshiftNodeData { + s.PostAction = &v + return s +} + +// SetPreAction sets the PreAction field's value. +func (s *AmazonRedshiftNodeData) SetPreAction(v string) *AmazonRedshiftNodeData { + s.PreAction = &v + return s +} + +// SetSampleQuery sets the SampleQuery field's value. +func (s *AmazonRedshiftNodeData) SetSampleQuery(v string) *AmazonRedshiftNodeData { + s.SampleQuery = &v + return s +} + +// SetSchema sets the Schema field's value. +func (s *AmazonRedshiftNodeData) SetSchema(v *Option) *AmazonRedshiftNodeData { + s.Schema = v + return s +} + +// SetSelectedColumns sets the SelectedColumns field's value. +func (s *AmazonRedshiftNodeData) SetSelectedColumns(v []*Option) *AmazonRedshiftNodeData { + s.SelectedColumns = v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *AmazonRedshiftNodeData) SetSourceType(v string) *AmazonRedshiftNodeData { + s.SourceType = &v + return s +} + +// SetStagingTable sets the StagingTable field's value. +func (s *AmazonRedshiftNodeData) SetStagingTable(v string) *AmazonRedshiftNodeData { + s.StagingTable = &v + return s +} + +// SetTable sets the Table field's value. +func (s *AmazonRedshiftNodeData) SetTable(v *Option) *AmazonRedshiftNodeData { + s.Table = v + return s +} + +// SetTablePrefix sets the TablePrefix field's value. +func (s *AmazonRedshiftNodeData) SetTablePrefix(v string) *AmazonRedshiftNodeData { + s.TablePrefix = &v + return s +} + +// SetTableSchema sets the TableSchema field's value. +func (s *AmazonRedshiftNodeData) SetTableSchema(v []*Option) *AmazonRedshiftNodeData { + s.TableSchema = v + return s +} + +// SetTempDir sets the TempDir field's value. +func (s *AmazonRedshiftNodeData) SetTempDir(v string) *AmazonRedshiftNodeData { + s.TempDir = &v + return s +} + +// SetUpsert sets the Upsert field's value. +func (s *AmazonRedshiftNodeData) SetUpsert(v bool) *AmazonRedshiftNodeData { + s.Upsert = &v + return s +} + +// Specifies an Amazon Redshift source. +type AmazonRedshiftSource struct { + _ struct{} `type:"structure"` + + // Specifies the data of the Amazon Reshift source node. + Data *AmazonRedshiftNodeData `type:"structure"` + + // The name of the Amazon Redshift source. + Name *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonRedshiftSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonRedshiftSource) GoString() string { + return s.String() +} + +// SetData sets the Data field's value. +func (s *AmazonRedshiftSource) SetData(v *AmazonRedshiftNodeData) *AmazonRedshiftSource { + s.Data = v + return s +} + +// SetName sets the Name field's value. +func (s *AmazonRedshiftSource) SetName(v string) *AmazonRedshiftSource { + s.Name = &v + return s +} + +// Specifies an Amazon Redshift target. +type AmazonRedshiftTarget struct { + _ struct{} `type:"structure"` + + // Specifies the data of the Amazon Reshift target node. + Data *AmazonRedshiftNodeData `type:"structure"` + + // The nodes that are inputs to the data target. + Inputs []*string `min:"1" type:"list"` + + // The name of the Amazon Redshift target. + Name *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonRedshiftTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonRedshiftTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AmazonRedshiftTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AmazonRedshiftTarget"} + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetData sets the Data field's value. +func (s *AmazonRedshiftTarget) SetData(v *AmazonRedshiftNodeData) *AmazonRedshiftTarget { + s.Data = v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *AmazonRedshiftTarget) SetInputs(v []*string) *AmazonRedshiftTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *AmazonRedshiftTarget) SetName(v string) *AmazonRedshiftTarget { + s.Name = &v + return s +} + // Specifies a transform that maps data property keys in the data source to // data property keys in the data target. You can rename keys, modify the data // types for keys, and choose which keys to drop from the dataset. @@ -25306,6 +25718,12 @@ type CodeGenConfigurationNode struct { // aggregated value by specified function. Aggregate *Aggregate `type:"structure"` + // Specifies a target that writes to a data source in Amazon Redshift. + AmazonRedshiftSource *AmazonRedshiftSource `type:"structure"` + + // Specifies a target that writes to a data target in Amazon Redshift. + AmazonRedshiftTarget *AmazonRedshiftTarget `type:"structure"` + // Specifies a transform that maps data property keys in the data source to // data property keys in the data target. You can rename keys, modify the data // types for keys, and choose which keys to drop from the dataset. @@ -25545,6 +25963,11 @@ func (s *CodeGenConfigurationNode) Validate() error { invalidParams.AddNested("Aggregate", err.(request.ErrInvalidParams)) } } + if s.AmazonRedshiftTarget != nil { + if err := s.AmazonRedshiftTarget.Validate(); err != nil { + invalidParams.AddNested("AmazonRedshiftTarget", err.(request.ErrInvalidParams)) + } + } if s.ApplyMapping != nil { if err := s.ApplyMapping.Validate(); err != nil { invalidParams.AddNested("ApplyMapping", err.(request.ErrInvalidParams)) @@ -25868,6 +26291,18 @@ func (s *CodeGenConfigurationNode) SetAggregate(v *Aggregate) *CodeGenConfigurat return s } +// SetAmazonRedshiftSource sets the AmazonRedshiftSource field's value. +func (s *CodeGenConfigurationNode) SetAmazonRedshiftSource(v *AmazonRedshiftSource) *CodeGenConfigurationNode { + s.AmazonRedshiftSource = v + return s +} + +// SetAmazonRedshiftTarget sets the AmazonRedshiftTarget field's value. +func (s *CodeGenConfigurationNode) SetAmazonRedshiftTarget(v *AmazonRedshiftTarget) *CodeGenConfigurationNode { + s.AmazonRedshiftTarget = v + return s +} + // SetApplyMapping sets the ApplyMapping field's value. func (s *CodeGenConfigurationNode) SetApplyMapping(v *ApplyMapping) *CodeGenConfigurationNode { s.ApplyMapping = v @@ -38402,6 +38837,9 @@ type DynamicTransform struct { // Name is a required field Name *string `type:"string" required:"true"` + // Specifies the data schema for the dynamic transform. + OutputSchemas []*GlueSchema `type:"list"` + // Specifies the parameters of the dynamic transform. Parameters []*TransformConfigParameter `type:"list"` @@ -38459,6 +38897,16 @@ func (s *DynamicTransform) Validate() error { if s.TransformName == nil { invalidParams.Add(request.NewErrParamRequired("TransformName")) } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } if s.Parameters != nil { for i, v := range s.Parameters { if v == nil { @@ -38494,6 +38942,12 @@ func (s *DynamicTransform) SetName(v string) *DynamicTransform { return s } +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *DynamicTransform) SetOutputSchemas(v []*GlueSchema) *DynamicTransform { + s.OutputSchemas = v + return s +} + // SetParameters sets the Parameters field's value. func (s *DynamicTransform) SetParameters(v []*TransformConfigParameter) *DynamicTransform { s.Parameters = v @@ -55730,6 +56184,56 @@ func (s *OperationTimeoutException) RequestID() string { return s.RespMetadata.RequestID } +// Specifies an option value. +type Option struct { + _ struct{} `type:"structure"` + + // Specifies the description of the option. + Description *string `type:"string"` + + // Specifies the label of the option. + Label *string `type:"string"` + + // Specifies the value of the option. + Value *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Option) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Option) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *Option) SetDescription(v string) *Option { + s.Description = &v + return s +} + +// SetLabel sets the Label field's value. +func (s *Option) SetLabel(v string) *Option { + s.Label = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Option) SetValue(v string) *Option { + s.Value = &v + return s +} + // Specifies an Oracle data source in the Glue Data Catalog. type OracleSQLCatalogSource struct { _ struct{} `type:"structure"` diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index bc2b94875d9..55ecb22db8d 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -38478,7 +38478,8 @@ type CreateDomainInput struct { // The entity that creates and manages the required security groups for inter-app // communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType // is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn - // is provided. + // is provided. If setting up the domain for use with RStudio, this value must + // be set to Service. AppSecurityGroupManagement *string `type:"string" enum:"AppSecurityGroupManagement"` // The mode of authentication that members use to access the domain. @@ -63453,6 +63454,10 @@ type DesiredWeightAndCapacity struct { // The variant's weight. DesiredWeight *float64 `type:"float"` + // Specifies the serverless update concurrency configuration for an endpoint + // variant. + ServerlessUpdateConfig *ProductionVariantServerlessUpdateConfig `type:"structure"` + // The name of the variant to update. // // VariantName is a required field @@ -63483,6 +63488,11 @@ func (s *DesiredWeightAndCapacity) Validate() error { if s.VariantName == nil { invalidParams.Add(request.NewErrParamRequired("VariantName")) } + if s.ServerlessUpdateConfig != nil { + if err := s.ServerlessUpdateConfig.Validate(); err != nil { + invalidParams.AddNested("ServerlessUpdateConfig", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -63502,6 +63512,12 @@ func (s *DesiredWeightAndCapacity) SetDesiredWeight(v float64) *DesiredWeightAnd return s } +// SetServerlessUpdateConfig sets the ServerlessUpdateConfig field's value. +func (s *DesiredWeightAndCapacity) SetServerlessUpdateConfig(v *ProductionVariantServerlessUpdateConfig) *DesiredWeightAndCapacity { + s.ServerlessUpdateConfig = v + return s +} + // SetVariantName sets the VariantName field's value. func (s *DesiredWeightAndCapacity) SetVariantName(v string) *DesiredWeightAndCapacity { s.VariantName = &v @@ -64320,7 +64336,8 @@ type DomainSettingsForUpdate struct { // or Pending state. ExecutionRoleIdentityConfig *string `type:"string" enum:"ExecutionRoleIdentityConfig"` - // A collection of RStudioServerPro Domain-level app settings to update. + // A collection of RStudioServerPro Domain-level app settings to update. A single + // RStudioServerPro application is created for a domain. RStudioServerProDomainSettingsForUpdate *RStudioServerProDomainSettingsForUpdate `type:"structure"` // The security groups for the Amazon Virtual Private Cloud that the Domain @@ -97160,6 +97177,10 @@ type ProductionVariantServerlessConfig struct { // // MemorySizeInMB is a required field MemorySizeInMB *int64 `min:"1024" type:"integer" required:"true"` + + // The amount of provisioned concurrency to allocate for the serverless endpoint. + // Should be less than or equal to MaxConcurrency. + ProvisionedConcurrency *int64 `min:"1" type:"integer"` } // String returns the string representation. @@ -97195,6 +97216,9 @@ func (s *ProductionVariantServerlessConfig) Validate() error { if s.MemorySizeInMB != nil && *s.MemorySizeInMB < 1024 { invalidParams.Add(request.NewErrParamMinValue("MemorySizeInMB", 1024)) } + if s.ProvisionedConcurrency != nil && *s.ProvisionedConcurrency < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedConcurrency", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -97214,6 +97238,72 @@ func (s *ProductionVariantServerlessConfig) SetMemorySizeInMB(v int64) *Producti return s } +// SetProvisionedConcurrency sets the ProvisionedConcurrency field's value. +func (s *ProductionVariantServerlessConfig) SetProvisionedConcurrency(v int64) *ProductionVariantServerlessConfig { + s.ProvisionedConcurrency = &v + return s +} + +// Specifies the serverless update concurrency configuration for an endpoint +// variant. +type ProductionVariantServerlessUpdateConfig struct { + _ struct{} `type:"structure"` + + // The updated maximum number of concurrent invocations your serverless endpoint + // can process. + MaxConcurrency *int64 `min:"1" type:"integer"` + + // The updated amount of provisioned concurrency to allocate for the serverless + // endpoint. Should be less than or equal to MaxConcurrency. + ProvisionedConcurrency *int64 `min:"1" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProductionVariantServerlessUpdateConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProductionVariantServerlessUpdateConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProductionVariantServerlessUpdateConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProductionVariantServerlessUpdateConfig"} + if s.MaxConcurrency != nil && *s.MaxConcurrency < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxConcurrency", 1)) + } + if s.ProvisionedConcurrency != nil && *s.ProvisionedConcurrency < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedConcurrency", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *ProductionVariantServerlessUpdateConfig) SetMaxConcurrency(v int64) *ProductionVariantServerlessUpdateConfig { + s.MaxConcurrency = &v + return s +} + +// SetProvisionedConcurrency sets the ProvisionedConcurrency field's value. +func (s *ProductionVariantServerlessUpdateConfig) SetProvisionedConcurrency(v int64) *ProductionVariantServerlessUpdateConfig { + s.ProvisionedConcurrency = &v + return s +} + // Describes the status of the production variant. type ProductionVariantStatus struct { _ struct{} `type:"structure"` @@ -109889,7 +109979,8 @@ type UpdateDomainInput struct { // The entity that creates and manages the required security groups for inter-app // communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType // is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn - // is provided. + // is provided. If setting up the domain for use with RStudio, this value must + // be set to Service. AppSecurityGroupManagement *string `type:"string" enum:"AppSecurityGroupManagement"` // The default settings used to create a space within the Domain. From 225097cc889350c51e00536bfc5ed30d89f0ff35 Mon Sep 17 00:00:00 2001 From: Isaiah Vita <82135527+isaiahvita@users.noreply.github.com> Date: Wed, 10 May 2023 09:04:22 -0700 Subject: [PATCH 3/7] remove deprecated service AWS SMS integ tests (#4834) --- CHANGELOG_PENDING.md | 3 ++ private/model/cli/gen-api/main.go | 4 +- service/sms/integ_test.go | 66 ------------------------------- 3 files changed, 6 insertions(+), 67 deletions(-) delete mode 100644 service/sms/integ_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 8a1927a39ca..a7039eda5b2 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -3,3 +3,6 @@ ### SDK Enhancements ### SDK Bugs + +* `service/sms`: Remove deprecated services (SMS) integration tests. + * SMS integration tests will fail because SMS deprecated their service. \ No newline at end of file diff --git a/private/model/cli/gen-api/main.go b/private/model/cli/gen-api/main.go index 5bb58b08a34..12a7995dae4 100644 --- a/private/model/cli/gen-api/main.go +++ b/private/model/cli/gen-api/main.go @@ -187,7 +187,9 @@ func writeServiceFiles(g *generateInfo, pkgDir string) { Must(writeS3ManagerUploadInputFile(g)) } - if len(g.API.SmokeTests.TestCases) > 0 { + // SMS service is deprecated and endpoints are turned off, so dont generate + // integration tests for that service. + if len(g.API.SmokeTests.TestCases) > 0 && g.API.PackageName() != "sms" { Must(writeAPISmokeTestsFile(g)) } } diff --git a/service/sms/integ_test.go b/service/sms/integ_test.go deleted file mode 100644 index 3378a300663..00000000000 --- a/service/sms/integ_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -//go:build go1.16 && integration -// +build go1.16,integration - -package sms_test - -import ( - "context" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/awstesting/integration" - "github.com/aws/aws-sdk-go/service/sms" -) - -var _ aws.Config -var _ awserr.Error -var _ request.Request - -func TestInteg_00_GetConnectors(t *testing.T) { - ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) - defer cancelFn() - - sess := integration.SessionWithDefaultRegion("us-west-2") - svc := sms.New(sess) - params := &sms.GetConnectorsInput{} - _, err := svc.GetConnectorsWithContext(ctx, params, func(r *request.Request) { - r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") - }) - if err != nil { - t.Errorf("expect no error, got %v", err) - } -} -func TestInteg_01_DeleteReplicationJob(t *testing.T) { - ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) - defer cancelFn() - - sess := integration.SessionWithDefaultRegion("us-west-2") - svc := sms.New(sess) - params := &sms.DeleteReplicationJobInput{ - ReplicationJobId: aws.String("invalidId"), - } - _, err := svc.DeleteReplicationJobWithContext(ctx, params, func(r *request.Request) { - r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") - }) - if err == nil { - t.Fatalf("expect request to fail") - } - aerr, ok := err.(awserr.RequestFailure) - if !ok { - t.Fatalf("expect awserr, was %T", err) - } - if len(aerr.Code()) == 0 { - t.Errorf("expect non-empty error code") - } - if len(aerr.Message()) == 0 { - t.Errorf("expect non-empty error message") - } - if v := aerr.Code(); v == request.ErrCodeSerialization { - t.Errorf("expect API error code got serialization failure") - } -} From c741f7708a53e32659fc0fdd4c039966f64eb26a Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Wed, 10 May 2023 11:26:21 -0700 Subject: [PATCH 4/7] Release v1.44.261 (2023-05-10) (#4836) Release v1.44.261 (2023-05-10) === ### Service Client Updates * `service/elasticmapreduce`: Updates service API and documentation * EMR Studio now supports programmatically executing a Notebooks on an EMR on EKS cluster. In addition, notebooks can now be executed by specifying its location in S3. * `service/rds`: Updates service API, documentation, waiters, paginators, and examples * Amazon Relational Database Service (RDS) updates for the new Aurora I/O-Optimized storage type for Amazon Aurora DB clusters * `service/swf`: Updates service API and documentation * This release adds a new API parameter to exclude old history events from decision tasks. ### SDK Bugs * `service/sms`: Remove deprecated services (SMS) integration tests. * SMS integration tests will fail because SMS deprecated their service. --- CHANGELOG.md | 16 + CHANGELOG_PENDING.md | 3 - aws/version.go | 2 +- .../elasticmapreduce/2009-03-31/api-2.json | 73 +- .../elasticmapreduce/2009-03-31/docs-2.json | 346 +++--- models/apis/rds/2014-10-31/api-2.json | 29 +- models/apis/rds/2014-10-31/docs-2.json | 31 +- models/apis/swf/2012-01-25/api-2.json | 4 +- models/apis/swf/2012-01-25/docs-2.json | 52 +- .../swf/2012-01-25/endpoint-rule-set-1.json | 436 ++++---- .../apis/swf/2012-01-25/endpoint-tests-1.json | 413 ++++---- models/apis/swf/2012-01-25/smoke.json | 20 + service/emr/api.go | 983 ++++++++++++------ service/rds/api.go | 139 ++- service/rds/errors.go | 9 +- service/rds/examples_test.go | 4 + service/swf/api.go | 102 +- service/swf/integ_test.go | 68 ++ 18 files changed, 1720 insertions(+), 1010 deletions(-) create mode 100644 models/apis/swf/2012-01-25/smoke.json create mode 100644 service/swf/integ_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index f1bb5a62085..cfd37b8c6d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +Release v1.44.261 (2023-05-10) +=== + +### Service Client Updates +* `service/elasticmapreduce`: Updates service API and documentation + * EMR Studio now supports programmatically executing a Notebooks on an EMR on EKS cluster. In addition, notebooks can now be executed by specifying its location in S3. +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * Amazon Relational Database Service (RDS) updates for the new Aurora I/O-Optimized storage type for Amazon Aurora DB clusters +* `service/swf`: Updates service API and documentation + * This release adds a new API parameter to exclude old history events from decision tasks. + +### SDK Bugs + +* `service/sms`: Remove deprecated services (SMS) integration tests. + * SMS integration tests will fail because SMS deprecated their service. + Release v1.44.260 (2023-05-09) === diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index a7039eda5b2..8a1927a39ca 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -3,6 +3,3 @@ ### SDK Enhancements ### SDK Bugs - -* `service/sms`: Remove deprecated services (SMS) integration tests. - * SMS integration tests will fail because SMS deprecated their service. \ No newline at end of file diff --git a/aws/version.go b/aws/version.go index af753a49130..51450cc09bc 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.260" +const SDKVersion = "1.44.261" diff --git a/models/apis/elasticmapreduce/2009-03-31/api-2.json b/models/apis/elasticmapreduce/2009-03-31/api-2.json index c3d097b797c..8f1e2a5c71f 100644 --- a/models/apis/elasticmapreduce/2009-03-31/api-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/api-2.json @@ -1370,6 +1370,11 @@ "AdditionalSlaveSecurityGroups":{"shape":"StringList"} } }, + "EnvironmentVariablesMap":{ + "type":"map", + "key":{"shape":"XmlStringMaxLen256"}, + "value":{"shape":"XmlString"} + }, "ErrorCode":{ "type":"string", "max":256, @@ -1398,7 +1403,8 @@ "members":{ "Id":{"shape":"XmlStringMaxLen256"}, "Type":{"shape":"ExecutionEngineType"}, - "MasterInstanceSecurityGroupId":{"shape":"XmlStringMaxLen256"} + "MasterInstanceSecurityGroupId":{"shape":"XmlStringMaxLen256"}, + "ExecutionRoleArn":{"shape":"IAMRoleArn"} } }, "ExecutionEngineType":{ @@ -1511,6 +1517,12 @@ "Args":{"shape":"StringList"} } }, + "IAMRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:(aws[a-zA-Z0-9-]*):iam::(\\d{12})?:(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)$" + }, "IdentityType":{ "type":"string", "enum":[ @@ -2172,7 +2184,8 @@ "Status":{"shape":"NotebookExecutionStatus"}, "From":{"shape":"Date"}, "To":{"shape":"Date"}, - "Marker":{"shape":"Marker"} + "Marker":{"shape":"Marker"}, + "ExecutionEngineId":{"shape":"XmlString"} } }, "ListNotebookExecutionsOutput":{ @@ -2341,7 +2354,11 @@ "OutputNotebookURI":{"shape":"XmlString"}, "LastStateChangeReason":{"shape":"XmlString"}, "NotebookInstanceSecurityGroupId":{"shape":"XmlStringMaxLen256"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "NotebookS3Location":{"shape":"NotebookS3LocationForOutput"}, + "OutputNotebookS3Location":{"shape":"OutputNotebookS3LocationForOutput"}, + "OutputNotebookFormat":{"shape":"OutputNotebookFormat"}, + "EnvironmentVariables":{"shape":"EnvironmentVariablesMap"} } }, "NotebookExecutionStatus":{ @@ -2367,13 +2384,29 @@ "NotebookExecutionName":{"shape":"XmlStringMaxLen256"}, "Status":{"shape":"NotebookExecutionStatus"}, "StartTime":{"shape":"Date"}, - "EndTime":{"shape":"Date"} + "EndTime":{"shape":"Date"}, + "NotebookS3Location":{"shape":"NotebookS3LocationForOutput"}, + "ExecutionEngineId":{"shape":"XmlString"} } }, "NotebookExecutionSummaryList":{ "type":"list", "member":{"shape":"NotebookExecutionSummary"} }, + "NotebookS3LocationForOutput":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"XmlStringMaxLen256"}, + "Key":{"shape":"UriString"} + } + }, + "NotebookS3LocationFromInput":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"XmlStringMaxLen256"}, + "Key":{"shape":"UriString"} + } + }, "OSRelease":{ "type":"structure", "members":{ @@ -2427,6 +2460,24 @@ "max":2048, "min":0 }, + "OutputNotebookFormat":{ + "type":"string", + "enum":["HTML"] + }, + "OutputNotebookS3LocationForOutput":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"XmlStringMaxLen256"}, + "Key":{"shape":"UriString"} + } + }, + "OutputNotebookS3LocationFromInput":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"XmlStringMaxLen256"}, + "Key":{"shape":"UriString"} + } + }, "PlacementGroupConfig":{ "type":"structure", "required":["InstanceRole"], @@ -2842,8 +2893,6 @@ "StartNotebookExecutionInput":{ "type":"structure", "required":[ - "EditorId", - "RelativePath", "ExecutionEngine", "ServiceRole" ], @@ -2855,7 +2904,11 @@ "ExecutionEngine":{"shape":"ExecutionEngineConfig"}, "ServiceRole":{"shape":"XmlString"}, "NotebookInstanceSecurityGroupId":{"shape":"XmlStringMaxLen256"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "NotebookS3Location":{"shape":"NotebookS3LocationFromInput"}, + "OutputNotebookS3Location":{"shape":"OutputNotebookS3LocationFromInput"}, + "OutputNotebookFormat":{"shape":"OutputNotebookFormat"}, + "EnvironmentVariables":{"shape":"EnvironmentVariablesMap"} } }, "StartNotebookExecutionOutput":{ @@ -3162,6 +3215,12 @@ "SessionPolicyArn":{"shape":"XmlStringMaxLen256"} } }, + "UriString":{ + "type":"string", + "max":10280, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\r\\n\\t]*" + }, "UsernamePassword":{ "type":"structure", "members":{ diff --git a/models/apis/elasticmapreduce/2009-03-31/docs-2.json b/models/apis/elasticmapreduce/2009-03-31/docs-2.json index ef699090773..cbdb1db175f 100644 --- a/models/apis/elasticmapreduce/2009-03-31/docs-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/docs-2.json @@ -2,11 +2,11 @@ "version": "2.0", "service": "

Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several Amazon Web Services services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management.

", "operations": { - "AddInstanceFleet": "

Adds an instance fleet to a running cluster.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x.

", + "AddInstanceFleet": "

Adds an instance fleet to a running cluster.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x.

", "AddInstanceGroups": "

Adds one or more instance groups to a running cluster.

", "AddJobFlowSteps": "

AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps are allowed in each job flow.

If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using SSH to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop.

A step specifies the location of a JAR file stored either on the master node of the cluster or in Amazon S3. Each step is performed by the main function of the main class of the JAR file. The main class can be specified either in the manifest of the JAR or by using the MainFunction parameter of the step.

Amazon EMR executes each step in the order listed. For a step to be considered complete, the main function must exit with a zero exit code and all Hadoop jobs started while the step was running must have completed and run successfully.

You can only add steps to a cluster that is in one of the following states: STARTING, BOOTSTRAPPING, RUNNING, or WAITING.

The string values passed into HadoopJarStep object cannot exceed a total of 10240 characters.

", "AddTags": "

Adds tags to an Amazon EMR resource, such as a cluster or an Amazon EMR Studio. Tags make it easier to associate resources in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.

", - "CancelSteps": "

Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. When you use Amazon EMR versions 5.28.0 and later, you can cancel steps that are in a PENDING or RUNNING state. In earlier versions of Amazon EMR, you can only cancel steps that are in a PENDING state.

", + "CancelSteps": "

Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. When you use Amazon EMR releases 5.28.0 and later, you can cancel steps that are in a PENDING or RUNNING state. In earlier versions of Amazon EMR, you can only cancel steps that are in a PENDING state.

", "CreateSecurityConfiguration": "

Creates a security configuration, which is stored in the service and can be specified when a cluster is created.

", "CreateStudio": "

Creates a new Amazon EMR Studio.

", "CreateStudioSessionMapping": "

Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group. Use CreateStudioSessionMapping to assign users to a Studio when you use IAM Identity Center authentication. For instructions on how to assign users to a Studio when you use IAM authentication, see Assign a user or group to your EMR Studio.

", @@ -16,7 +16,7 @@ "DescribeCluster": "

Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on.

", "DescribeJobFlows": "

This API is no longer supported and will eventually be removed. We recommend you use ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.

DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.

Regardless of supplied parameters, only job flows created within the last two months are returned.

If no parameters are supplied, then job flows matching either of the following criteria are returned:

Amazon EMR can return a maximum of 512 job flow descriptions.

", "DescribeNotebookExecution": "

Provides details of a notebook execution.

", - "DescribeReleaseLabel": "

Provides EMR release label details, such as releases available the region where the API request is run, and the available applications for a specific EMR release label. Can also list EMR release versions that support a specified version of Spark.

", + "DescribeReleaseLabel": "

Provides Amazon EMR release label details, such as the releases available the Region where the API request is run, and the available applications for a specific Amazon EMR release label. Can also list Amazon EMR releases that support a specified version of Spark.

", "DescribeSecurityConfiguration": "

Provides the details of a security configuration by returning the configuration JSON.

", "DescribeStep": "

Provides more detail about the cluster step.

", "DescribeStudio": "

Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on.

", @@ -27,32 +27,32 @@ "GetStudioSessionMapping": "

Fetches mapping details for the specified Amazon EMR Studio and identity (user or group).

", "ListBootstrapActions": "

Provides information about the bootstrap actions associated with a cluster.

", "ListClusters": "

Provides the status of all clusters visible to this Amazon Web Services account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters in unsorted order per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.

", - "ListInstanceFleets": "

Lists all available details about the instance fleets in a cluster.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "ListInstanceFleets": "

Lists all available details about the instance fleets in a cluster.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "ListInstanceGroups": "

Provides all available details about the instance groups in a cluster.

", - "ListInstances": "

Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.

", - "ListNotebookExecutions": "

Provides summaries of all notebook executions. You can filter the list based on multiple criteria such as status, time range, and editor id. Returns a maximum of 50 notebook executions and a marker to track the paging of a longer notebook execution list across multiple ListNotebookExecution calls.

", - "ListReleaseLabels": "

Retrieves release labels of EMR services in the region where the API is called.

", + "ListInstances": "

Provides information for all active Amazon EC2 instances and Amazon EC2 instances terminated in the last 30 days, up to a maximum of 2,000. Amazon EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.

", + "ListNotebookExecutions": "

Provides summaries of all notebook executions. You can filter the list based on multiple criteria such as status, time range, and editor id. Returns a maximum of 50 notebook executions and a marker to track the paging of a longer notebook execution list across multiple ListNotebookExecutions calls.

", + "ListReleaseLabels": "

Retrieves release labels of Amazon EMR services in the Region where the API is called.

", "ListSecurityConfigurations": "

Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls.

", "ListSteps": "

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request or filter by StepStates. You can specify a maximum of 10 stepIDs. The CLI automatically paginates results to return a list greater than 50 steps. To return more than 50 steps using the CLI, specify a Marker, which is a pagination token that indicates the next set of steps to retrieve.

", "ListStudioSessionMappings": "

Returns a list of all user or group session mappings for the Amazon EMR Studio specified by StudioId.

", "ListStudios": "

Returns a list of all Amazon EMR Studios associated with the Amazon Web Services account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

", "ModifyCluster": "

Modifies the number of steps that can be executed concurrently for the cluster specified using ClusterID.

", - "ModifyInstanceFleet": "

Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "ModifyInstanceFleet": "

Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "ModifyInstanceGroups": "

ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.

", - "PutAutoScalingPolicy": "

Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric.

", - "PutAutoTerminationPolicy": "

Auto-termination is supported in Amazon EMR versions 5.30.0 and 6.1.0 and later. For more information, see Using an auto-termination policy.

Creates or updates an auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. For alternative cluster termination options, see Control cluster termination.

", + "PutAutoScalingPolicy": "

Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates Amazon EC2 instances in response to the value of a CloudWatch metric.

", + "PutAutoTerminationPolicy": "

Auto-termination is supported in Amazon EMR releases 5.30.0 and 6.1.0 and later. For more information, see Using an auto-termination policy.

Creates or updates an auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. For alternative cluster termination options, see Control cluster termination.

", "PutBlockPublicAccessConfiguration": "

Creates or updates an Amazon EMR block public access configuration for your Amazon Web Services account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

", - "PutManagedScalingPolicy": "

Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

", - "RemoveAutoScalingPolicy": "

Removes an automatic scaling policy from a specified instance group within an EMR cluster.

", + "PutManagedScalingPolicy": "

Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as Amazon EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

", + "RemoveAutoScalingPolicy": "

Removes an automatic scaling policy from a specified instance group within an Amazon EMR cluster.

", "RemoveAutoTerminationPolicy": "

Removes an auto-termination policy from an Amazon EMR cluster.

", - "RemoveManagedScalingPolicy": "

Removes a managed scaling policy from a specified EMR cluster.

", + "RemoveManagedScalingPolicy": "

Removes a managed scaling policy from a specified Amazon EMR cluster.

", "RemoveTags": "

Removes tags from an Amazon EMR resource, such as a cluster or Amazon EMR Studio. Tags make it easier to associate resources in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.

The following example removes the stack tag with value Prod from a cluster:

", - "RunJobFlow": "

RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps specified. After the steps complete, the cluster stops and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the cluster transitions to the WAITING state rather than shutting down after the steps have completed.

For additional protection, you can set the JobFlowInstancesConfig TerminationProtected parameter to TRUE to lock the cluster and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.

A maximum of 256 steps are allowed in each job flow.

If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop.

For long-running clusters, we recommend that you periodically store your results.

The instance fleets configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but not both.

", - "SetTerminationProtection": "

SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all EC2 instances in a cluster.

SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

For more information, seeManaging Cluster Termination in the Amazon EMR Management Guide.

", - "SetVisibleToAllUsers": "

The SetVisibleToAllUsers parameter is no longer supported. Your cluster may be visible to all users in your account. To restrict cluster access using an IAM policy, see Identity and Access Management for EMR.

Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true, IAM principals in the Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.

This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.

For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.

", + "RunJobFlow": "

RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps specified. After the steps complete, the cluster stops and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the cluster transitions to the WAITING state rather than shutting down after the steps have completed.

For additional protection, you can set the JobFlowInstancesConfig TerminationProtected parameter to TRUE to lock the cluster and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.

A maximum of 256 steps are allowed in each job flow.

If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop.

For long-running clusters, we recommend that you periodically store your results.

The instance fleets configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but not both.

", + "SetTerminationProtection": "

SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all Amazon EC2 instances in a cluster.

SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

For more information, seeManaging Cluster Termination in the Amazon EMR Management Guide.

", + "SetVisibleToAllUsers": "

The SetVisibleToAllUsers parameter is no longer supported. Your cluster may be visible to all users in your account. To restrict cluster access using an IAM policy, see Identity and Access Management for Amazon EMR.

Sets the Cluster$VisibleToAllUsers value for an Amazon EMR cluster. When true, IAM principals in the Amazon Web Services account can perform Amazon EMR cluster actions that their IAM policies allow. When false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform Amazon EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.

This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.

For more information, see Understanding the Amazon EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

", "StartNotebookExecution": "

Starts a notebook execution.

", "StopNotebookExecution": "

Stops a notebook execution.

", - "TerminateJobFlows": "

TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.

The maximum number of clusters allowed is 10. The call to TerminateJobFlows is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.

", + "TerminateJobFlows": "

TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the Amazon EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.

The maximum number of clusters allowed is 10. The call to TerminateJobFlows is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.

", "UpdateStudio": "

Updates an Amazon EMR Studio configuration, including attributes such as name, description, and subnets.

", "UpdateStudioSessionMapping": "

Updates the session policy attached to the user or group for the specified Amazon EMR Studio.

" }, @@ -108,7 +108,7 @@ "AdjustmentType": { "base": null, "refs": { - "SimpleScalingPolicyConfiguration$AdjustmentType": "

The way in which EC2 instances are added (if ScalingAdjustment is a positive number) or terminated (if ScalingAdjustment is a negative number) each time the scaling activity is triggered. CHANGE_IN_CAPACITY is the default. CHANGE_IN_CAPACITY indicates that the EC2 instance count increments or decrements by ScalingAdjustment, which should be expressed as an integer. PERCENT_CHANGE_IN_CAPACITY indicates the instance count increments or decrements by the percentage specified by ScalingAdjustment, which should be expressed as an integer. For example, 20 indicates an increase in 20% increments of cluster capacity. EXACT_CAPACITY indicates the scaling activity results in an instance group with the number of EC2 instances specified by ScalingAdjustment, which should be expressed as a positive integer.

" + "SimpleScalingPolicyConfiguration$AdjustmentType": "

The way in which Amazon EC2 instances are added (if ScalingAdjustment is a positive number) or terminated (if ScalingAdjustment is a negative number) each time the scaling activity is triggered. CHANGE_IN_CAPACITY is the default. CHANGE_IN_CAPACITY indicates that the Amazon EC2 instance count increments or decrements by ScalingAdjustment, which should be expressed as an integer. PERCENT_CHANGE_IN_CAPACITY indicates the instance count increments or decrements by the percentage specified by ScalingAdjustment, which should be expressed as an integer. For example, 20 indicates an increase in 20% increments of cluster capacity. EXACT_CAPACITY indicates the scaling activity results in an instance group with the number of Amazon EC2 instances specified by ScalingAdjustment, which should be expressed as a positive integer.

" } }, "Application": { @@ -129,7 +129,7 @@ "refs": { "AddInstanceFleetOutput$ClusterArn": "

The Amazon Resource Name of the cluster.

", "AddInstanceGroupsOutput$ClusterArn": "

The Amazon Resource Name of the cluster.

", - "AddJobFlowStepsInput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the runtime role for a step on the cluster. The runtime role can be a cross-account IAM role. The runtime role ARN is a combination of account ID, role name, and role type using the following format: arn:partition:service:region:account:resource.

For example, arn:aws:iam::1234567890:role/ReadOnly is a correctly formatted runtime role ARN.

", + "AddJobFlowStepsInput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the runtime role for a step on the cluster. The runtime role can be a cross-account IAM role. The runtime role ARN is a combination of account ID, role name, and role type using the following format: arn:partition:service:region:account:resource.

For example, arn:aws:IAM::1234567890:role/ReadOnly is a correctly formatted runtime role ARN.

", "BlockPublicAccessConfigurationMetadata$CreatedByArn": "

The Amazon Resource Name that created or last modified the configuration.

", "Cluster$ClusterArn": "

The Amazon Resource Name of the cluster.

", "ClusterSummary$ClusterArn": "

The Amazon Resource Name of the cluster.

", @@ -142,21 +142,21 @@ "base": null, "refs": { "CreateStudioInput$AuthMode": "

Specifies whether the Studio authenticates users using IAM or IAM Identity Center.

", - "Studio$AuthMode": "

Specifies whether the Amazon EMR Studio authenticates users using IAM or IAM Identity Center.

", + "Studio$AuthMode": "

Specifies whether the Amazon EMR Studio authenticates users with IAM or IAM Identity Center.

", "StudioSummary$AuthMode": "

Specifies whether the Studio authenticates users using IAM or IAM Identity Center.

" } }, "AutoScalingPolicy": { - "base": "

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. An automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

", + "base": "

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. An automatic scaling policy defines how an instance group dynamically adds and terminates Amazon EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

", "refs": { - "InstanceGroupConfig$AutoScalingPolicy": "

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

", + "InstanceGroupConfig$AutoScalingPolicy": "

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates Amazon EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

", "PutAutoScalingPolicyInput$AutoScalingPolicy": "

Specifies the definition of the automatic scaling policy.

" } }, "AutoScalingPolicyDescription": { - "base": "

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

", + "base": "

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates Amazon EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

", "refs": { - "InstanceGroup$AutoScalingPolicy": "

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

", + "InstanceGroup$AutoScalingPolicy": "

An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates Amazon EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.

", "PutAutoScalingPolicyOutput$AutoScalingPolicy": "

The automatic scaling policy definition.

" } }, @@ -195,8 +195,8 @@ "BlockPublicAccessConfiguration": { "base": "

A configuration for Amazon EMR block public access. When BlockPublicSecurityGroupRules is set to true, Amazon EMR prevents cluster creation if one of the cluster's security groups has a rule that allows inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges.

", "refs": { - "GetBlockPublicAccessConfigurationOutput$BlockPublicAccessConfiguration": "

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating the block public access configuration to remove the exception.

For accounts that created clusters in a Region before November 25, 2019, block public access is disabled by default in that Region. To use this feature, you must manually enable and configure it. For accounts that did not create an EMR cluster in a Region before this date, block public access is enabled by default in that Region.

", - "PutBlockPublicAccessConfigurationInput$BlockPublicAccessConfiguration": "

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating BlockPublicSecurityGroupRules to remove the exception.

For accounts that created clusters in a Region before November 25, 2019, block public access is disabled by default in that Region. To use this feature, you must manually enable and configure it. For accounts that did not create an EMR cluster in a Region before this date, block public access is enabled by default in that Region.

" + "GetBlockPublicAccessConfigurationOutput$BlockPublicAccessConfiguration": "

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating the block public access configuration to remove the exception.

For accounts that created clusters in a Region before November 25, 2019, block public access is disabled by default in that Region. To use this feature, you must manually enable and configure it. For accounts that did not create an Amazon EMR cluster in a Region before this date, block public access is enabled by default in that Region.

", + "PutBlockPublicAccessConfigurationInput$BlockPublicAccessConfiguration": "

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating BlockPublicSecurityGroupRules to remove the exception.

For accounts that created clusters in a Region before November 25, 2019, block public access is disabled by default in that Region. To use this feature, you must manually enable and configure it. For accounts that did not create an Amazon EMR cluster in a Region before this date, block public access is enabled by default in that Region.

" } }, "BlockPublicAccessConfigurationMetadata": { @@ -208,18 +208,18 @@ "Boolean": { "base": null, "refs": { - "BlockPublicAccessConfiguration$BlockPublicSecurityGroupRules": "

Indicates whether Amazon EMR block public access is enabled (true) or disabled (false). By default, the value is false for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true.

", + "BlockPublicAccessConfiguration$BlockPublicSecurityGroupRules": "

Indicates whether Amazon EMR block public access is enabled (true) or disabled (false). By default, the value is false for accounts that have created Amazon EMR clusters before July 2019. For accounts created after this, the default is true.

", "Cluster$AutoTerminate": "

Specifies whether the cluster should terminate after completing all steps.

", - "Cluster$TerminationProtected": "

Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.

", - "Cluster$VisibleToAllUsers": "

Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When true, IAM principals in the Amazon Web Services account can perform EMR cluster actions on the cluster that their IAM policies allow. When false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.

The default value is true if a value is not provided when creating a cluster using the EMR API RunJobFlow command, the CLI create-cluster command, or the Amazon Web Services Management Console.

", - "JobFlowDetail$VisibleToAllUsers": "

Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When true, IAM principals in the Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.

The default value is true if a value is not provided when creating a cluster using the EMR API RunJobFlow command, the CLI create-cluster command, or the Amazon Web Services Management Console.

", + "Cluster$TerminationProtected": "

Indicates whether Amazon EMR will lock the cluster to prevent the Amazon EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.

", + "Cluster$VisibleToAllUsers": "

Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When true, IAM principals in the Amazon Web Services account can perform Amazon EMR cluster actions on the cluster that their IAM policies allow. When false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform Amazon EMR actions, regardless of IAM permissions policies attached to other IAM principals.

The default value is true if a value is not provided when creating a cluster using the Amazon EMR API RunJobFlow command, the CLI create-cluster command, or the Amazon Web Services Management Console.

", + "JobFlowDetail$VisibleToAllUsers": "

Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When true, IAM principals in the Amazon Web Services account can perform Amazon EMR cluster actions that their IAM policies allow. When false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform Amazon EMR actions, regardless of IAM permissions policies attached to other IAM principals.

The default value is true if a value is not provided when creating a cluster using the Amazon EMR API RunJobFlow command, the CLI create-cluster command, or the Amazon Web Services Management Console.

", "JobFlowInstancesConfig$KeepJobFlowAliveWhenNoSteps": "

Specifies whether the cluster should remain available after completing all steps. Defaults to true. For more information about configuring cluster termination, see Control Cluster Termination in the EMR Management Guide.

", "JobFlowInstancesConfig$TerminationProtected": "

Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.

", "JobFlowInstancesDetail$KeepJobFlowAliveWhenNoSteps": "

Specifies whether the cluster should remain available after completing all steps.

", "JobFlowInstancesDetail$TerminationProtected": "

Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.

", - "RunJobFlowInput$VisibleToAllUsers": "

The VisibleToAllUsers parameter is no longer supported. By default, the value is set to true. Setting it to false now has no effect.

Set this value to true so that IAM principals in the Amazon Web Services account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to true for clusters created using the EMR API or the CLI create-cluster command.

When set to false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.

", + "RunJobFlowInput$VisibleToAllUsers": "

The VisibleToAllUsers parameter is no longer supported. By default, the value is set to true. Setting it to false now has no effect.

Set this value to true so that IAM principals in the Amazon Web Services account associated with the cluster can perform Amazon EMR actions on the cluster that their IAM policies allow. This value defaults to true for clusters created using the Amazon EMR API or the CLI create-cluster command.

When set to false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform Amazon EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the Amazon EMR cluster VisibleToAllUsers setting in the Amazon EMR Management Guide.

", "SetTerminationProtectionInput$TerminationProtected": "

A Boolean that indicates whether to protect the cluster and prevent the Amazon EC2 instances in the cluster from shutting down due to API calls, user intervention, or job-flow error.

", - "SetVisibleToAllUsersInput$VisibleToAllUsers": "

A value of true indicates that an IAM principal in the Amazon Web Services account can perform EMR actions on the cluster that the IAM policies attached to the principal allow. A value of false indicates that only the IAM principal that created the cluster and the Amazon Web Services root user can perform EMR actions on the cluster.

" + "SetVisibleToAllUsersInput$VisibleToAllUsers": "

A value of true indicates that an IAM principal in the Amazon Web Services account can perform Amazon EMR actions on the cluster that the IAM policies attached to the principal allow. A value of false indicates that only the IAM principal that created the cluster and the Amazon Web Services root user can perform Amazon EMR actions on the cluster.

" } }, "BooleanObject": { @@ -314,7 +314,7 @@ "PutAutoScalingPolicyInput$ClusterId": "

Specifies the ID of a cluster. The instance group to which the automatic scaling policy is applied is within this cluster.

", "PutAutoScalingPolicyOutput$ClusterId": "

Specifies the ID of a cluster. The instance group to which the automatic scaling policy is applied is within this cluster.

", "PutAutoTerminationPolicyInput$ClusterId": "

Specifies the ID of the Amazon EMR cluster to which the auto-termination policy will be attached.

", - "PutManagedScalingPolicyInput$ClusterId": "

Specifies the ID of an EMR cluster where the managed scaling policy is attached.

", + "PutManagedScalingPolicyInput$ClusterId": "

Specifies the ID of an Amazon EMR cluster where the managed scaling policy is attached.

", "RemoveAutoScalingPolicyInput$ClusterId": "

Specifies the ID of a cluster. The instance group to which the automatic scaling policy is applied is within this cluster.

", "RemoveAutoTerminationPolicyInput$ClusterId": "

Specifies the ID of the Amazon EMR cluster from which the auto-termination policy will be removed.

", "RemoveManagedScalingPolicyInput$ClusterId": "

Specifies the ID of the cluster from which the managed scaling policy will be removed.

" @@ -389,9 +389,9 @@ } }, "ComputeLimits": { - "base": "

The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster can not be above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

", + "base": "

The Amazon EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster can not be above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

", "refs": { - "ManagedScalingPolicy$ComputeLimits": "

The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster is not allowed to go above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + "ManagedScalingPolicy$ComputeLimits": "

The Amazon EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster is not allowed to go above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" } }, "ComputeLimitsUnitType": { @@ -409,15 +409,15 @@ "ConfigurationList": { "base": null, "refs": { - "Cluster$Configurations": "

Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.

", + "Cluster$Configurations": "

Applies only to Amazon EMR releases 4.x and later. The list of configurations that are supplied to the Amazon EMR cluster.

", "Configuration$Configurations": "

A list of additional configurations to apply within a configuration object.

", "InstanceGroup$Configurations": "

Amazon EMR releases 4.x or later.

The list of configurations supplied for an Amazon EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

", "InstanceGroup$LastSuccessfullyAppliedConfigurations": "

A list of configurations that were successfully applied for an instance group last time.

", - "InstanceGroupConfig$Configurations": "

Amazon EMR releases 4.x or later.

The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

", + "InstanceGroupConfig$Configurations": "

Amazon EMR releases 4.x or later.

The list of configurations supplied for an Amazon EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

", "InstanceGroupModifyConfig$Configurations": "

A list of new or modified configurations to apply for an instance group.

", "InstanceTypeConfig$Configurations": "

A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.

", "InstanceTypeSpecification$Configurations": "

A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR.

", - "RunJobFlowInput$Configurations": "

For Amazon EMR releases 4.0 and later. The list of configurations supplied for the EMR cluster you are creating.

" + "RunJobFlowInput$Configurations": "

For Amazon EMR releases 4.0 and later. The list of configurations supplied for the Amazon EMR cluster that you are creating.

" } }, "CreateSecurityConfigurationInput": { @@ -602,7 +602,7 @@ "EC2InstanceIdsToTerminateList": { "base": null, "refs": { - "InstanceGroupModifyConfig$EC2InstanceIdsToTerminate": "

The EC2 InstanceIds to terminate. After you terminate the instances, the instance group will not return to its original requested size.

" + "InstanceGroupModifyConfig$EC2InstanceIdsToTerminate": "

The Amazon EC2 InstanceIds to terminate. After you terminate the instances, the instance group will not return to its original requested size.

" } }, "EbsBlockDevice": { @@ -633,12 +633,12 @@ "EbsConfiguration": { "base": "

The Amazon EBS configuration of a cluster instance.

", "refs": { - "InstanceGroupConfig$EbsConfiguration": "

EBS configurations that will be attached to each EC2 instance in the instance group.

", + "InstanceGroupConfig$EbsConfiguration": "

EBS configurations that will be attached to each Amazon EC2 instance in the instance group.

", "InstanceTypeConfig$EbsConfiguration": "

The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by InstanceType.

" } }, "EbsVolume": { - "base": "

EBS block device that's attached to an EC2 instance.

", + "base": "

EBS block device that's attached to an Amazon EC2 instance.

", "refs": { "EbsVolumeList$member": null } @@ -650,9 +650,16 @@ } }, "Ec2InstanceAttributes": { - "base": "

Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

", + "base": "

Provides information about the Amazon EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

", "refs": { - "Cluster$Ec2InstanceAttributes": "

Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

" + "Cluster$Ec2InstanceAttributes": "

Provides information about the Amazon EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

" + } + }, + "EnvironmentVariablesMap": { + "base": null, + "refs": { + "NotebookExecution$EnvironmentVariables": "

The environment variables associated with the notebook execution.

", + "StartNotebookExecutionInput$EnvironmentVariables": "

The environment variables associated with the notebook execution.

" } }, "ErrorCode": { @@ -664,7 +671,7 @@ "ErrorData": { "base": null, "refs": { - "ErrorDetail$ErrorData": "

A list of key value pairs that provide contextual information to explain why the error may have occured.

" + "ErrorDetail$ErrorData": "

A list of key value pairs that provides contextual information about why an error occured.

" } }, "ErrorDetail": { @@ -676,7 +683,7 @@ "ErrorDetailList": { "base": null, "refs": { - "ClusterStatus$ErrorDetails": "

A list of tuples that provide information about the errors that caused a cluster termination. This structure may have up to 10 different ErrorDetail tuples.

" + "ClusterStatus$ErrorDetails": "

A list of tuples that provides information about the errors that caused a cluster to terminate. This structure can contain up to 10 different ErrorDetail tuples.

" } }, "ErrorMessage": { @@ -687,16 +694,16 @@ } }, "ExecutionEngineConfig": { - "base": "

Specifies the execution engine (cluster) to run the notebook and perform the notebook execution, for example, an EMR cluster.

", + "base": "

Specifies the execution engine (cluster) to run the notebook and perform the notebook execution, for example, an Amazon EMR cluster.

", "refs": { - "NotebookExecution$ExecutionEngine": "

The execution engine, such as an EMR cluster, used to run the EMR notebook and perform the notebook execution.

", + "NotebookExecution$ExecutionEngine": "

The execution engine, such as an Amazon EMR cluster, used to run the Amazon EMR notebook and perform the notebook execution.

", "StartNotebookExecutionInput$ExecutionEngine": "

Specifies the execution engine (cluster) that runs the notebook execution.

" } }, "ExecutionEngineType": { "base": null, "refs": { - "ExecutionEngineConfig$Type": "

The type of execution engine. A value of EMR specifies an EMR cluster.

" + "ExecutionEngineConfig$Type": "

The type of execution engine. A value of EMR specifies an Amazon EMR cluster.

" } }, "FailureDetails": { @@ -768,6 +775,12 @@ "StepSummary$Config": "

The Hadoop job configuration of the cluster step.

" } }, + "IAMRoleArn": { + "base": null, + "refs": { + "ExecutionEngineConfig$ExecutionRoleArn": "

The execution role ARN required for the notebook execution.

" + } + }, "IdentityType": { "base": null, "refs": { @@ -781,7 +794,7 @@ } }, "Instance": { - "base": "

Represents an EC2 instance provisioned as part of cluster.

", + "base": "

Represents an Amazon EC2 instance provisioned as part of cluster.

", "refs": { "InstanceList$member": null } @@ -789,17 +802,17 @@ "InstanceCollectionType": { "base": null, "refs": { - "Cluster$InstanceCollectionType": "

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

The instance group configuration of the cluster. A value of INSTANCE_GROUP indicates a uniform instance group configuration. A value of INSTANCE_FLEET indicates an instance fleets configuration.

" + "Cluster$InstanceCollectionType": "

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

The instance group configuration of the cluster. A value of INSTANCE_GROUP indicates a uniform instance group configuration. A value of INSTANCE_FLEET indicates an instance fleets configuration.

" } }, "InstanceFleet": { - "base": "

Describes an instance fleet, which is a group of EC2 instances that host a particular node type (master, core, or task) in an Amazon EMR cluster. Instance fleets can consist of a mix of instance types and On-Demand and Spot Instances, which are provisioned to meet a defined target capacity.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "base": "

Describes an instance fleet, which is a group of Amazon EC2 instances that host a particular node type (master, core, or task) in an Amazon EMR cluster. Instance fleets can consist of a mix of instance types and On-Demand and Spot Instances, which are provisioned to meet a defined target capacity.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "refs": { "InstanceFleetList$member": null } }, "InstanceFleetConfig": { - "base": "

The configuration that defines an instance fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "base": "

The configuration that defines an instance fleet.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "refs": { "AddInstanceFleetInput$InstanceFleet": "

Specifies the configuration of the instance fleet.

", "InstanceFleetConfigList$member": null @@ -808,14 +821,14 @@ "InstanceFleetConfigList": { "base": null, "refs": { - "JobFlowInstancesConfig$InstanceFleets": "

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

Describes the EC2 instances and instance configurations for clusters that use the instance fleet configuration.

" + "JobFlowInstancesConfig$InstanceFleets": "

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

Describes the Amazon EC2 instances and instance configurations for clusters that use the instance fleet configuration.

" } }, "InstanceFleetId": { "base": null, "refs": { "AddInstanceFleetOutput$InstanceFleetId": "

The unique identifier of the instance fleet.

", - "Instance$InstanceFleetId": "

The unique identifier of the instance fleet to which an EC2 instance belongs.

", + "Instance$InstanceFleetId": "

The unique identifier of the instance fleet to which an Amazon EC2 instance belongs.

", "InstanceFleet$Id": "

The unique identifier of the instance fleet.

", "InstanceFleetModifyConfig$InstanceFleetId": "

A unique identifier for the instance fleet.

", "ListInstancesInput$InstanceFleetId": "

The unique identifier of the instance fleet.

" @@ -828,13 +841,13 @@ } }, "InstanceFleetModifyConfig": { - "base": "

Configuration parameters for an instance fleet modification request.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "base": "

Configuration parameters for an instance fleet modification request.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "refs": { "ModifyInstanceFleetInput$InstanceFleet": "

The configuration parameters of the instance fleet.

" } }, "InstanceFleetProvisioningSpecifications": { - "base": "

The launch specification for Spot Instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand and Spot instance allocation strategies are available in Amazon EMR version 5.12.1 and later.

", + "base": "

The launch specification for Spot Instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand and Spot instance allocation strategies are available in Amazon EMR releases 5.12.1 and later.

", "refs": { "InstanceFleet$LaunchSpecifications": "

Describes the launch specification for an instance fleet.

", "InstanceFleetConfig$LaunchSpecifications": "

The launch specification for the instance fleet.

" @@ -851,11 +864,11 @@ "InstanceFleetState": { "base": null, "refs": { - "InstanceFleetStatus$State": "

A code representing the instance fleet status.

" + "InstanceFleetStatus$State": "

A code representing the instance fleet status.

" } }, "InstanceFleetStateChangeReason": { - "base": "

Provides status change reason details for the instance fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "base": "

Provides status change reason details for the instance fleet.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "refs": { "InstanceFleetStatus$StateChangeReason": "

Provides status change reason details for the instance fleet.

" } @@ -867,13 +880,13 @@ } }, "InstanceFleetStatus": { - "base": "

The status of the instance fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "base": "

The status of the instance fleet.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "refs": { "InstanceFleet$Status": "

The current status of the instance fleet.

" } }, "InstanceFleetTimeline": { - "base": "

Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "base": "

Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "refs": { "InstanceFleetStatus$Timeline": "

Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.

" } @@ -1021,7 +1034,7 @@ "refs": { "InstanceGroupConfig$InstanceRole": "

The role of the instance group in the cluster.

", "InstanceGroupDetail$InstanceRole": "

Instance group role in the cluster

", - "PlacementGroupConfig$InstanceRole": "

Role of the instance in the cluster.

Starting with Amazon EMR version 5.23.0, the only supported instance role is MASTER.

" + "PlacementGroupConfig$InstanceRole": "

Role of the instance in the cluster.

Starting with Amazon EMR release 5.23.0, the only supported instance role is MASTER.

" } }, "InstanceState": { @@ -1064,20 +1077,20 @@ "InstanceType": { "base": null, "refs": { - "Instance$InstanceType": "

The EC2 instance type, for example m3.xlarge.

", - "InstanceGroup$InstanceType": "

The EC2 instance type for all instances in the instance group.

", - "InstanceGroupConfig$InstanceType": "

The EC2 instance type for all instances in the instance group.

", - "InstanceGroupDetail$InstanceType": "

EC2 instance type.

", - "InstanceTypeConfig$InstanceType": "

An EC2 instance type, such as m3.xlarge.

", - "InstanceTypeSpecification$InstanceType": "

The EC2 instance type, for example m3.xlarge.

", - "JobFlowInstancesConfig$MasterInstanceType": "

The EC2 instance type of the master node.

", - "JobFlowInstancesConfig$SlaveInstanceType": "

The EC2 instance type of the core and task nodes.

", + "Instance$InstanceType": "

The Amazon EC2 instance type, for example m3.xlarge.

", + "InstanceGroup$InstanceType": "

The Amazon EC2 instance type for all instances in the instance group.

", + "InstanceGroupConfig$InstanceType": "

The Amazon EC2 instance type for all instances in the instance group.

", + "InstanceGroupDetail$InstanceType": "

Amazon EC2 instance type.

", + "InstanceTypeConfig$InstanceType": "

An Amazon EC2 instance type, such as m3.xlarge.

", + "InstanceTypeSpecification$InstanceType": "

The Amazon EC2 instance type, for example m3.xlarge.

", + "JobFlowInstancesConfig$MasterInstanceType": "

The Amazon EC2 instance type of the master node.

", + "JobFlowInstancesConfig$SlaveInstanceType": "

The Amazon EC2 instance type of the core and task nodes.

", "JobFlowInstancesDetail$MasterInstanceType": "

The Amazon EC2 master node instance type.

", "JobFlowInstancesDetail$SlaveInstanceType": "

The Amazon EC2 core and task node instance type.

" } }, "InstanceTypeConfig": { - "base": "

An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. When you use an allocation strategy, you can include a maximum of 30 instance type configurations for a fleet. For more information about how to use an allocation strategy, see Configure Instance Fleets. Without an allocation strategy, you may specify a maximum of five instance type configurations for a fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "base": "

An instance type configuration for each instance type in an instance fleet, which determines the Amazon EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. When you use an allocation strategy, you can include a maximum of 30 instance type configurations for a fleet. For more information about how to use an allocation strategy, see Configure Instance Fleets. Without an allocation strategy, you may specify a maximum of five instance type configurations for a fleet.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "refs": { "InstanceTypeConfigList$member": null } @@ -1085,11 +1098,11 @@ "InstanceTypeConfigList": { "base": null, "refs": { - "InstanceFleetConfig$InstanceTypeConfigs": "

The instance type configurations that define the EC2 instances in the instance fleet.

" + "InstanceFleetConfig$InstanceTypeConfigs": "

The instance type configurations that define the Amazon EC2 instances in the instance fleet.

" } }, "InstanceTypeSpecification": { - "base": "

The configuration specification for each instance type in an instance fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "base": "

The configuration specification for each instance type in an instance fleet.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", "refs": { "InstanceTypeSpecificationList$member": null } @@ -1104,15 +1117,15 @@ "base": null, "refs": { "CloudWatchAlarmDefinition$EvaluationPeriods": "

The number of periods, in five-minute increments, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is 1.

", - "CloudWatchAlarmDefinition$Period": "

The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify 300.

", - "Cluster$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

", - "Cluster$EbsRootVolumeSize": "

The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

", + "CloudWatchAlarmDefinition$Period": "

The period, in seconds, over which the statistic is applied. CloudWatch metrics for Amazon EMR are emitted every five minutes (300 seconds), so if you specify a CloudWatch metric, specify 300.

", + "Cluster$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

", + "Cluster$EbsRootVolumeSize": "

The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each Amazon EC2 instance. Available in Amazon EMR releases 4.x and later.

", "Cluster$StepConcurrencyLevel": "

Specifies the number of steps that can be executed concurrently.

", - "ClusterSummary$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

", - "ComputeLimits$MinimumCapacityUnits": "

The lower boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

", - "ComputeLimits$MaximumCapacityUnits": "

The upper boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

", - "ComputeLimits$MaximumOnDemandCapacityUnits": "

The upper boundary of On-Demand EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot Instances.

", - "ComputeLimits$MaximumCoreCapacityUnits": "

The upper boundary of EC2 units for core node type in a cluster. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes.

", + "ClusterSummary$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

", + "ComputeLimits$MinimumCapacityUnits": "

The lower boundary of Amazon EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

", + "ComputeLimits$MaximumCapacityUnits": "

The upper boundary of Amazon EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

", + "ComputeLimits$MaximumOnDemandCapacityUnits": "

The upper boundary of On-Demand Amazon EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot Instances.

", + "ComputeLimits$MaximumCoreCapacityUnits": "

The upper boundary of Amazon EC2 units for core node type in a cluster. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes.

", "EbsBlockDeviceConfig$VolumesPerInstance": "

Number of EBS volumes with a specific volume configuration that are associated with every instance in the instance group

", "InstanceGroup$RequestedInstanceCount": "

The target number of instances for the instance group.

", "InstanceGroup$RunningInstanceCount": "

The number of instances currently running in this instance group.

", @@ -1121,17 +1134,17 @@ "InstanceGroupDetail$InstanceRunningCount": "

Actual count of running instances.

", "InstanceGroupModifyConfig$InstanceCount": "

Target size for the instance group.

", "InstanceResizePolicy$InstanceTerminationTimeout": "

Decommissioning timeout override for the specific list of instances to be terminated.

", - "JobFlowInstancesConfig$InstanceCount": "

The number of EC2 instances in the cluster.

", + "JobFlowInstancesConfig$InstanceCount": "

The number of Amazon EC2 instances in the cluster.

", "JobFlowInstancesDetail$InstanceCount": "

The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.

", "JobFlowInstancesDetail$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is increased one time for every hour that an m1.small instance runs. Larger instances are weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being increased incrementally four times. This result is only an approximation and does not reflect the actual billing rate.

", "ModifyClusterInput$StepConcurrencyLevel": "

The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps. We recommend that you do not change this parameter while steps are running or the ActionOnFailure setting may not behave as expected. For more information see Step$ActionOnFailure.

", "ModifyClusterOutput$StepConcurrencyLevel": "

The number of steps that can be executed concurrently.

", - "RunJobFlowInput$EbsRootVolumeSize": "

The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

", + "RunJobFlowInput$EbsRootVolumeSize": "

The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each Amazon EC2 instance. Available in Amazon EMR releases 4.x and later.

", "RunJobFlowInput$StepConcurrencyLevel": "

Specifies the number of steps that can be executed concurrently. The default value is 1. The maximum value is 256.

", - "ScalingConstraints$MinCapacity": "

The lower boundary of EC2 instances in an instance group below which scaling activities are not allowed to shrink. Scale-in activities will not terminate instances below this boundary.

", - "ScalingConstraints$MaxCapacity": "

The upper boundary of EC2 instances in an instance group beyond which scaling activities are not allowed to grow. Scale-out activities will not add instances beyond this boundary.

", + "ScalingConstraints$MinCapacity": "

The lower boundary of Amazon EC2 instances in an instance group below which scaling activities are not allowed to shrink. Scale-in activities will not terminate instances below this boundary.

", + "ScalingConstraints$MaxCapacity": "

The upper boundary of Amazon EC2 instances in an instance group beyond which scaling activities are not allowed to grow. Scale-out activities will not add instances beyond this boundary.

", "ShrinkPolicy$DecommissionTimeout": "

The desired timeout for decommissioning an instance. Overrides the default YARN decommissioning timeout.

", - "SimpleScalingPolicyConfiguration$ScalingAdjustment": "

The amount by which to scale in or scale out, based on the specified AdjustmentType. A positive value adds to the instance group's EC2 instance count while a negative number removes instances. If AdjustmentType is set to EXACT_CAPACITY, the number should only be a positive integer. If AdjustmentType is set to PERCENT_CHANGE_IN_CAPACITY, the value should express the percentage as an integer. For example, -20 indicates a decrease in 20% increments of cluster capacity.

", + "SimpleScalingPolicyConfiguration$ScalingAdjustment": "

The amount by which to scale in or scale out, based on the specified AdjustmentType. A positive value adds to the instance group's Amazon EC2 instance count while a negative number removes instances. If AdjustmentType is set to EXACT_CAPACITY, the number should only be a positive integer. If AdjustmentType is set to PERCENT_CHANGE_IN_CAPACITY, the value should express the percentage as an integer. For example, -20 indicates a decrease in 20% increments of cluster capacity.

", "SimpleScalingPolicyConfiguration$CoolDown": "

The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start. The default value is 0.

", "VolumeSpecification$Iops": "

The number of I/O operations per second (IOPS) that the volume supports.

", "VolumeSpecification$SizeInGB": "

The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.

" @@ -1370,8 +1383,8 @@ "refs": { "Instance$Market": "

The instance purchasing option. Valid values are ON_DEMAND or SPOT.

", "InstanceGroup$Market": "

The marketplace to provision instances for this group. Valid values are ON_DEMAND or SPOT.

", - "InstanceGroupConfig$Market": "

Market type of the EC2 instances used to create a cluster node.

", - "InstanceGroupDetail$Market": "

Market type of the EC2 instances used to create a cluster node.

", + "InstanceGroupConfig$Market": "

Market type of the Amazon EC2 instances used to create a cluster node.

", + "InstanceGroupDetail$Market": "

Market type of the Amazon EC2 instances used to create a cluster node.

", "ScalingAction$Market": "

Not available for instance groups. Instance groups use the market type specified for the group.

" } }, @@ -1417,19 +1430,19 @@ "NewSupportedProductsList": { "base": null, "refs": { - "RunJobFlowInput$NewSupportedProducts": "

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" + "RunJobFlowInput$NewSupportedProducts": "

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" } }, "NonNegativeDouble": { "base": null, "refs": { "CloudWatchAlarmDefinition$Threshold": "

The value against which the specified statistic is compared.

", - "InstanceTypeConfig$BidPriceAsPercentageOfOnDemandPrice": "

The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", - "InstanceTypeSpecification$BidPriceAsPercentageOfOnDemandPrice": "

The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%).

" + "InstanceTypeConfig$BidPriceAsPercentageOfOnDemandPrice": "

The bid price, as a percentage of On-Demand price, for each Amazon EC2 Spot Instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", + "InstanceTypeSpecification$BidPriceAsPercentageOfOnDemandPrice": "

The bid price, as a percentage of On-Demand price, for each Amazon EC2 Spot Instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%).

" } }, "NotebookExecution": { - "base": "

A notebook execution. An execution is a specific instance that an EMR Notebook is run using the StartNotebookExecution action.

", + "base": "

A notebook execution. An execution is a specific instance that an Amazon EMR Notebook is run using the StartNotebookExecution action.

", "refs": { "DescribeNotebookExecutionOutput$NotebookExecution": "

Properties of the notebook execution.

" } @@ -1454,6 +1467,19 @@ "ListNotebookExecutionsOutput$NotebookExecutions": "

A list of notebook executions.

" } }, + "NotebookS3LocationForOutput": { + "base": "

The Amazon S3 location that stores the notebook execution input.

", + "refs": { + "NotebookExecution$NotebookS3Location": "

The Amazon S3 location that stores the notebook execution input.

", + "NotebookExecutionSummary$NotebookS3Location": "

The Amazon S3 location that stores the notebook execution input.

" + } + }, + "NotebookS3LocationFromInput": { + "base": "

The Amazon S3 location that stores the notebook execution input.

", + "refs": { + "StartNotebookExecutionInput$NotebookS3Location": "

The Amazon S3 location for the notebook execution input.

" + } + }, "OSRelease": { "base": "

The Amazon Linux release specified for a cluster in the RunJobFlow request.

", "refs": { @@ -1491,9 +1517,9 @@ } }, "OnDemandProvisioningSpecification": { - "base": "

The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

", + "base": "

The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.

", "refs": { - "InstanceFleetProvisioningSpecifications$OnDemandSpecification": "

The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

" + "InstanceFleetProvisioningSpecifications$OnDemandSpecification": "

The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.

" } }, "OnDemandResizingSpecification": { @@ -1507,11 +1533,30 @@ "refs": { "Cluster$OutpostArn": "

The Amazon Resource Name (ARN) of the Outpost where the cluster is launched.

", "ClusterSummary$OutpostArn": "

The Amazon Resource Name (ARN) of the Outpost where the cluster is launched.

", - "Step$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the runtime role for a step on the cluster. The runtime role can be a cross-account IAM role. The runtime role ARN is a combination of account ID, role name, and role type using the following format: arn:partition:service:region:account:resource.

For example, arn:aws:iam::1234567890:role/ReadOnly is a correctly formatted runtime role ARN.

" + "Step$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the runtime role for a step on the cluster. The runtime role can be a cross-account IAM role. The runtime role ARN is a combination of account ID, role name, and role type using the following format: arn:partition:service:region:account:resource.

For example, arn:aws:IAM::1234567890:role/ReadOnly is a correctly formatted runtime role ARN.

" + } + }, + "OutputNotebookFormat": { + "base": null, + "refs": { + "NotebookExecution$OutputNotebookFormat": "

The output format for the notebook execution.

", + "StartNotebookExecutionInput$OutputNotebookFormat": "

The output format for the notebook execution.

" + } + }, + "OutputNotebookS3LocationForOutput": { + "base": "

The Amazon S3 location that stores the notebook execution output.

", + "refs": { + "NotebookExecution$OutputNotebookS3Location": "

The Amazon S3 location for the notebook execution output.

" + } + }, + "OutputNotebookS3LocationFromInput": { + "base": "

The Amazon S3 location that stores the notebook execution output.

", + "refs": { + "StartNotebookExecutionInput$OutputNotebookS3Location": "

The Amazon S3 location for the notebook execution output.

" } }, "PlacementGroupConfig": { - "base": "

Placement group configuration for an Amazon EMR cluster. The configuration specifies the placement strategy that can be applied to instance roles during cluster creation.

To use this configuration, consider attaching managed policy AmazonElasticMapReducePlacementGroupPolicy to the EMR role.

", + "base": "

Placement group configuration for an Amazon EMR cluster. The configuration specifies the placement strategy that can be applied to instance roles during cluster creation.

To use this configuration, consider attaching managed policy AmazonElasticMapReducePlacementGroupPolicy to the Amazon EMR role.

", "refs": { "PlacementGroupConfigList$member": null } @@ -1526,7 +1571,7 @@ "PlacementGroupStrategy": { "base": null, "refs": { - "PlacementGroupConfig$PlacementStrategy": "

EC2 Placement Group strategy associated with instance role.

Starting with Amazon EMR version 5.23.0, the only supported placement strategy is SPREAD for the MASTER instance role.

" + "PlacementGroupConfig$PlacementStrategy": "

Amazon EC2 Placement Group strategy associated with instance role.

Starting with Amazon EMR release 5.23.0, the only supported placement strategy is SPREAD for the MASTER instance role.

" } }, "PlacementType": { @@ -1552,7 +1597,7 @@ "PortRanges": { "base": null, "refs": { - "BlockPublicAccessConfiguration$PermittedPublicSecurityGroupRuleRanges": "

Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for PermittedPublicSecurityGroupRuleRanges, Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.

By default, Port 22, which is used for SSH access to the cluster EC2 instances, is in the list of PermittedPublicSecurityGroupRuleRanges.

" + "BlockPublicAccessConfiguration$PermittedPublicSecurityGroupRuleRanges": "

Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for PermittedPublicSecurityGroupRuleRanges, Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.

By default, Port 22, which is used for SSH access to the cluster Amazon EC2 instances, is in the list of PermittedPublicSecurityGroupRuleRanges.

" } }, "PutAutoScalingPolicyInput": { @@ -1650,7 +1695,7 @@ "RepoUpgradeOnBoot": { "base": null, "refs": { - "Cluster$RepoUpgradeOnBoot": "

Applies only when CustomAmiID is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI.

", + "Cluster$RepoUpgradeOnBoot": "

Applies only when CustomAmiID is used. Specifies the type of updates that the Amazon Linux AMI package repositories apply when an instance boots using the AMI.

", "RunJobFlowInput$RepoUpgradeOnBoot": "

Applies only when CustomAmiID is used. Specifies which updates from the Amazon Linux AMI package repositories to apply automatically when the instance boots using the AMI. If omitted, the default is SECURITY, which indicates that only security updates are applied. If NONE is specified, no updates are applied, and all updates must be applied manually.

" } }, @@ -1674,9 +1719,9 @@ "ScaleDownBehavior": { "base": null, "refs": { - "Cluster$ScaleDownBehavior": "

The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION is available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

", - "JobFlowDetail$ScaleDownBehavior": "

The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

", - "RunJobFlowInput$ScaleDownBehavior": "

Specifies the way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

" + "Cluster$ScaleDownBehavior": "

The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION is available only in Amazon EMR releases 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

", + "JobFlowDetail$ScaleDownBehavior": "

The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR releases 4.1.0 and later, and is the default for releases of Amazon EMR earlier than 5.1.0.

", + "RunJobFlowInput$ScaleDownBehavior": "

Specifies the way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR releases 4.1.0 and later, and is the default for releases of Amazon EMR earlier than 5.1.0.

" } }, "ScalingAction": { @@ -1686,14 +1731,14 @@ } }, "ScalingConstraints": { - "base": "

The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activities triggered by automatic scaling rules will not cause an instance group to grow above or below these limits.

", + "base": "

The upper and lower Amazon EC2 instance limits for an automatic scaling policy. Automatic scaling activities triggered by automatic scaling rules will not cause an instance group to grow above or below these limits.

", "refs": { - "AutoScalingPolicy$Constraints": "

The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.

", - "AutoScalingPolicyDescription$Constraints": "

The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.

" + "AutoScalingPolicy$Constraints": "

The upper and lower Amazon EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.

", + "AutoScalingPolicyDescription$Constraints": "

The upper and lower Amazon EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.

" } }, "ScalingRule": { - "base": "

A scale-in or scale-out rule that defines scaling activity, including the CloudWatch metric alarm that triggers activity, how EC2 instances are added or removed, and the periodicity of adjustments. The automatic scaling policy for an instance group can comprise one or more automatic scaling rules.

", + "base": "

A scale-in or scale-out rule that defines scaling activity, including the CloudWatch metric alarm that triggers activity, how Amazon EC2 instances are added or removed, and the periodicity of adjustments. The automatic scaling policy for an instance group can comprise one or more automatic scaling rules.

", "refs": { "ScalingRuleList$member": null } @@ -1772,7 +1817,7 @@ } }, "SimpleScalingPolicyConfiguration": { - "base": "

An automatic scaling configuration, which describes how the policy adds or removes instances, the cooldown period, and the number of EC2 instances that will be added each time the CloudWatch metric alarm condition is satisfied.

", + "base": "

An automatic scaling configuration, which describes how the policy adds or removes instances, the cooldown period, and the number of Amazon EC2 instances that will be added each time the CloudWatch metric alarm condition is satisfied.

", "refs": { "ScalingAction$SimpleScalingPolicyConfiguration": "

The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.

" } @@ -1796,7 +1841,7 @@ } }, "SpotProvisioningSpecification": { - "base": "

The launch specification for Spot Instances in the instance fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. Spot Instance allocation strategy is available in Amazon EMR version 5.12.1 and later.

Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022.

", + "base": "

The launch specification for Spot Instances in the instance fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. Spot Instance allocation strategy is available in Amazon EMR releases 5.12.1 and later.

Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022.

", "refs": { "InstanceFleetProvisioningSpecifications$SpotSpecification": "

The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

" } @@ -1961,7 +2006,7 @@ "CloudWatchAlarmDefinition$Namespace": "

The namespace for the CloudWatch metric. The default is AWS/ElasticMapReduce.

", "Cluster$Name": "

The name of the cluster.

", "Cluster$LogUri": "

The path to the Amazon S3 location where logs for this cluster are stored.

", - "Cluster$LogEncryptionKmsKeyId": "

The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

", + "Cluster$LogEncryptionKmsKeyId": "

The KMS key used for encrypting log files. This attribute is only available with Amazon EMR 5.30.0 and later, excluding Amazon EMR 6.0.0.

", "Cluster$RequestedAmiVersion": "

The AMI version requested for this cluster.

", "Cluster$RunningAmiVersion": "

The AMI version running on this cluster.

", "Cluster$ReleaseLabel": "

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion.

", @@ -1985,12 +2030,12 @@ "Ec2InstanceAttributes$Ec2KeyName": "

The name of the Amazon EC2 key pair to use when connecting with SSH into the master node as a user named \"hadoop\".

", "Ec2InstanceAttributes$Ec2SubnetId": "

Set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, and your account supports EC2-Classic, the cluster launches in EC2-Classic.

", "Ec2InstanceAttributes$Ec2AvailabilityZone": "

The Availability Zone in which the cluster will run.

", - "Ec2InstanceAttributes$IamInstanceProfile": "

The IAM role that was specified when the cluster was launched. The EC2 instances of the cluster assume this role.

", + "Ec2InstanceAttributes$IamInstanceProfile": "

The IAM role that was specified when the cluster was launched. The Amazon EC2 instances of the cluster assume this role.

", "Ec2InstanceAttributes$EmrManagedMasterSecurityGroup": "

The identifier of the Amazon EC2 security group for the master node.

", "Ec2InstanceAttributes$EmrManagedSlaveSecurityGroup": "

The identifier of the Amazon EC2 security group for the core and task nodes.

", "Ec2InstanceAttributes$ServiceAccessSecurityGroup": "

The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.

", - "ErrorDetail$ErrorCode": "

The name or code that's associated with the error.

", - "ErrorDetail$ErrorMessage": "

A message describing the error that occured.

", + "ErrorDetail$ErrorCode": "

The name or code associated with the error.

", + "ErrorDetail$ErrorMessage": "

A message that describes the error.

", "FailureDetails$Reason": "

The reason for the step failure. In the case where the service cannot successfully determine the root cause of the failure, it returns \"Unknown Error\" as a reason.

", "FailureDetails$Message": "

The descriptive message including the error the Amazon EMR service has identified as the cause of step failure. This is text from an error log that describes the root cause of the failure.

", "FailureDetails$LogFile": "

The path to the log file where the step failure root cause was originally recorded.

", @@ -2078,7 +2123,7 @@ } }, "SupportedProductConfig": { - "base": "

The list of supported product configurations that allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.

", + "base": "

The list of supported product configurations that allow user-supplied arguments. Amazon EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.

", "refs": { "NewSupportedProductsList$member": null } @@ -2135,6 +2180,15 @@ "refs": { } }, + "UriString": { + "base": null, + "refs": { + "NotebookS3LocationForOutput$Key": "

The key to the Amazon S3 location that stores the notebook execution input.

", + "NotebookS3LocationFromInput$Key": "

The key to the Amazon S3 location that stores the notebook execution input.

", + "OutputNotebookS3LocationForOutput$Key": "

The key to the Amazon S3 location that stores the notebook execution output.

", + "OutputNotebookS3LocationFromInput$Key": "

The key to the Amazon S3 location that stores the notebook execution output.

" + } + }, "UsernamePassword": { "base": "

The username and password that you use to connect to cluster endpoints.

", "refs": { @@ -2142,10 +2196,10 @@ } }, "VolumeSpecification": { - "base": "

EBS volume specifications such as volume type, IOPS, size (GiB) and throughput (MiB/s) that are requested for the EBS volume attached to an EC2 instance in the cluster.

", + "base": "

EBS volume specifications such as volume type, IOPS, size (GiB) and throughput (MiB/s) that are requested for the EBS volume attached to an Amazon EC2 instance in the cluster.

", "refs": { - "EbsBlockDevice$VolumeSpecification": "

EBS volume specifications such as volume type, IOPS, size (GiB) and throughput (MiB/s) that are requested for the EBS volume attached to an EC2 instance in the cluster.

", - "EbsBlockDeviceConfig$VolumeSpecification": "

EBS volume specifications such as volume type, IOPS, size (GiB) and throughput (MiB/s) that are requested for the EBS volume attached to an EC2 instance in the cluster.

" + "EbsBlockDevice$VolumeSpecification": "

EBS volume specifications such as volume type, IOPS, size (GiB) and throughput (MiB/s) that are requested for the EBS volume attached to an Amazon EC2 instance in the cluster.

", + "EbsBlockDeviceConfig$VolumeSpecification": "

EBS volume specifications such as volume type, IOPS, size (GiB) and throughput (MiB/s) that are requested for the EBS volume attached to an Amazon EC2 instance in the cluster.

" } }, "WholeNumber": { @@ -2171,7 +2225,7 @@ "base": null, "refs": { "Cluster$SecurityConfiguration": "

The name of the security configuration applied to the cluster.

", - "Cluster$AutoScalingRole": "

An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.

", + "Cluster$AutoScalingRole": "

An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate Amazon EC2 instances in an instance group.

", "CreateSecurityConfigurationInput$Name": "

The name of the security configuration.

", "CreateSecurityConfigurationOutput$Name": "

The name of the security configuration.

", "CreateStudioInput$ServiceRole": "

The IAM role that the Amazon EMR Studio assumes. The service role provides a way for Amazon EMR Studio to interoperate with other Amazon Web Services services.

", @@ -2182,35 +2236,38 @@ "DeleteSecurityConfigurationInput$Name": "

The name of the security configuration.

", "DescribeSecurityConfigurationInput$Name": "

The name of the security configuration.

", "DescribeSecurityConfigurationOutput$Name": "

The name of the security configuration.

", + "EnvironmentVariablesMap$value": null, "HadoopJarStepConfig$Jar": "

A path to a JAR file run during the step.

", "HadoopJarStepConfig$MainClass": "

The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.

", "InstanceGroupDetail$LastStateChangeReason": "

Details regarding the state of the instance group.

", "JobFlowDetail$LogUri": "

The location in Amazon S3 where log files for the job are stored.

", - "JobFlowDetail$LogEncryptionKmsKeyId": "

The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

", - "JobFlowDetail$JobFlowRole": "

The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

", + "JobFlowDetail$LogEncryptionKmsKeyId": "

The KMS key used for encrypting log files. This attribute is only available with Amazon EMR 5.30.0 and later, excluding 6.0.0.

", + "JobFlowDetail$JobFlowRole": "

The IAM role that was specified when the job flow was launched. The Amazon EC2 instances of the job flow assume this role.

", "JobFlowDetail$ServiceRole": "

The IAM role that is assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.

", - "JobFlowDetail$AutoScalingRole": "

An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. The IAM role provides a way for the automatic scaling feature to get the required permissions it needs to launch and terminate EC2 instances in an instance group.

", + "JobFlowDetail$AutoScalingRole": "

An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. The IAM role provides a way for the automatic scaling feature to get the required permissions it needs to launch and terminate Amazon EC2 instances in an instance group.

", "JobFlowExecutionStatusDetail$LastStateChangeReason": "

Description of the job flow last changed state.

", "JobFlowInstancesDetail$MasterPublicDnsName": "

The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.

", "JobFlowInstancesDetail$MasterInstanceId": "

The Amazon EC2 instance identifier of the master node.

", "KeyValue$Key": "

The unique identifier of a key-value pair.

", "KeyValue$Value": "

The value part of the identified key.

", - "NotebookExecution$NotebookParams": "

Input parameters in JSON format passed to the EMR Notebook at runtime for execution.

", + "ListNotebookExecutionsInput$ExecutionEngineId": "

The unique ID of the execution engine.

", + "NotebookExecution$NotebookParams": "

Input parameters in JSON format passed to the Amazon EMR Notebook at runtime for execution.

", "NotebookExecution$OutputNotebookURI": "

The location of the notebook execution's output file in Amazon S3.

", "NotebookExecution$LastStateChangeReason": "

The reason for the latest status change of the notebook execution.

", + "NotebookExecutionSummary$ExecutionEngineId": "

The unique ID of the execution engine for the notebook execution.

", "PlacementType$AvailabilityZone": "

The Amazon EC2 Availability Zone for the cluster. AvailabilityZone is used for uniform instance groups, while AvailabilityZones (plural) is used for instance fleets.

", "RunJobFlowInput$LogUri": "

The location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created.

", - "RunJobFlowInput$LogEncryptionKmsKeyId": "

The KMS key used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256. This attribute is only available with Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.

", + "RunJobFlowInput$LogEncryptionKmsKeyId": "

The KMS key used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256. This attribute is only available with Amazon EMR releases 5.30.0 and later, excluding Amazon EMR 6.0.0.

", "RunJobFlowInput$AdditionalInfo": "

A JSON string for selecting additional features.

", - "RunJobFlowInput$JobFlowRole": "

Also called instance profile and EC2 role. An IAM role for an EMR cluster. The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole. In order to use the default role, you must have already created it using the CLI or console.

", + "RunJobFlowInput$JobFlowRole": "

Also called instance profile and Amazon EC2 role. An IAM role for an Amazon EMR cluster. The Amazon EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole. In order to use the default role, you must have already created it using the CLI or console.

", "RunJobFlowInput$ServiceRole": "

The IAM role that Amazon EMR assumes in order to access Amazon Web Services resources on your behalf. If you've created a custom service role path, you must specify it for the service role when you launch your cluster.

", "RunJobFlowInput$SecurityConfiguration": "

The name of a security configuration to apply to the cluster.

", - "RunJobFlowInput$AutoScalingRole": "

An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.

", + "RunJobFlowInput$AutoScalingRole": "

An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate Amazon EC2 instances in an instance group.

", "ScriptBootstrapActionConfig$Path": "

Location in Amazon S3 of the script to run during a bootstrap action.

", "SecurityConfigurationSummary$Name": "

The name of the security configuration.

", - "StartNotebookExecutionInput$RelativePath": "

The path and file name of the notebook file for this execution, relative to the path specified for the EMR Notebook. For example, if you specify a path of s3://MyBucket/MyNotebooks when you create an EMR Notebook for a notebook with an ID of e-ABCDEFGHIJK1234567890ABCD (the EditorID of this request), and you specify a RelativePath of my_notebook_executions/notebook_execution.ipynb, the location of the file for the notebook execution is s3://MyBucket/MyNotebooks/e-ABCDEFGHIJK1234567890ABCD/my_notebook_executions/notebook_execution.ipynb.

", - "StartNotebookExecutionInput$NotebookParams": "

Input parameters in JSON format passed to the EMR Notebook at runtime for execution.

", - "StartNotebookExecutionInput$ServiceRole": "

The name or ARN of the IAM role that is used as the service role for Amazon EMR (the EMR role) for the notebook execution.

", + "StartNotebookExecutionInput$RelativePath": "

The path and file name of the notebook file for this execution, relative to the path specified for the Amazon EMR Notebook. For example, if you specify a path of s3://MyBucket/MyNotebooks when you create an Amazon EMR Notebook for a notebook with an ID of e-ABCDEFGHIJK1234567890ABCD (the EditorID of this request), and you specify a RelativePath of my_notebook_executions/notebook_execution.ipynb, the location of the file for the notebook execution is s3://MyBucket/MyNotebooks/e-ABCDEFGHIJK1234567890ABCD/my_notebook_executions/notebook_execution.ipynb.

", + "StartNotebookExecutionInput$NotebookParams": "

Input parameters in JSON format passed to the Amazon EMR Notebook at runtime for execution.

", + "StartNotebookExecutionInput$ServiceRole": "

The name or ARN of the IAM role that is used as the service role for Amazon EMR (the Amazon EMR role) for the notebook execution.

", "StepExecutionStatusDetail$LastStateChangeReason": "

A description of the step's current state.

", "Studio$ServiceRole": "

The name of the IAM role assumed by the Amazon EMR Studio.

", "Studio$UserRole": "

The name of the IAM role assumed by users logged in to the Amazon EMR Studio. A Studio only requires a UserRole when you use IAM authentication.

", @@ -2244,7 +2301,7 @@ "AddJobFlowStepsInput$JobFlowId": "

A string that uniquely identifies the job flow. This identifier is returned by RunJobFlow and can also be obtained from ListClusters.

", "BootstrapActionConfig$Name": "

The name of the bootstrap action.

", "CancelStepsInput$ClusterId": "

The ClusterID for the specified steps that will be canceled. Use RunJobFlow and ListClusters to get ClusterIDs.

", - "Cluster$CustomAmiId": "

Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.

", + "Cluster$CustomAmiId": "

Available only in Amazon EMR releases 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.

", "CreateStudioInput$Name": "

A descriptive name for the Amazon EMR Studio.

", "CreateStudioInput$Description": "

A detailed description of the Amazon EMR Studio.

", "CreateStudioInput$VpcId": "

The ID of the Amazon Virtual Private Cloud (Amazon VPC) to associate with the Studio.

", @@ -2255,15 +2312,16 @@ "CreateStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio to which the user or group will be mapped.

", "CreateStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group from the IAM Identity Center Identity Store. For more information, see UserId and GroupId in the IAM Identity Center Identity Store API Reference. Either IdentityName or IdentityId must be specified, but not both.

", "CreateStudioSessionMappingInput$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the IAM Identity Center Identity Store API Reference. Either IdentityName or IdentityId must be specified, but not both.

", - "CreateStudioSessionMappingInput$SessionPolicyArn": "

The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. You should specify the ARN for the session policy that you want to apply, not the ARN of your user role. For more information, see Create an EMR Studio User Role with Session Policies.

", + "CreateStudioSessionMappingInput$SessionPolicyArn": "

The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. You should specify the ARN for the session policy that you want to apply, not the ARN of your user role. For more information, see Create an Amazon EMR Studio User Role with Session Policies.

", "DeleteStudioInput$StudioId": "

The ID of the Amazon EMR Studio.

", "DeleteStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio.

", "DeleteStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR Studio. For more information, see UserId and GroupId in the IAM Identity Center Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", "DeleteStudioSessionMappingInput$IdentityName": "

The name of the user name or group to remove from the Amazon EMR Studio. For more information, see UserName and DisplayName in the IAM Identity Center Store API Reference. Either IdentityName or IdentityId must be specified.

", "DescribeNotebookExecutionInput$NotebookExecutionId": "

The unique identifier of the notebook execution.

", "DescribeStudioInput$StudioId": "

The Amazon EMR Studio ID.

", - "ExecutionEngineConfig$Id": "

The unique identifier of the execution engine. For an EMR cluster, this is the cluster ID.

", - "ExecutionEngineConfig$MasterInstanceSecurityGroupId": "

An optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. For more information see Specifying EC2 Security Groups for EMR Notebooks in the EMR Management Guide.

", + "EnvironmentVariablesMap$key": null, + "ExecutionEngineConfig$Id": "

The unique identifier of the execution engine. For an Amazon EMR cluster, this is the cluster ID.

", + "ExecutionEngineConfig$MasterInstanceSecurityGroupId": "

An optional unique ID of an Amazon EC2 security group to associate with the master instance of the Amazon EMR cluster for this notebook execution. For more information see Specifying Amazon EC2 Security Groups for Amazon EMR Notebooks in the EMR Management Guide.

", "GetClusterSessionCredentialsInput$ClusterId": "

The unique identifier of the cluster.

", "GetStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio.

", "GetStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the IAM Identity Center Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", @@ -2280,14 +2338,14 @@ "InstanceGroupDetail$CustomAmiId": "

The custom AMI ID to use for the provisioned instance group.

", "InstanceGroupIdsList$member": null, "InstanceGroupModifyConfig$InstanceGroupId": "

Unique ID of the instance group to modify.

", - "InstanceTypeConfig$BidPrice": "

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", + "InstanceTypeConfig$BidPrice": "

The bid price for each Amazon EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", "InstanceTypeConfig$CustomAmiId": "

The custom AMI ID to use for the instance type.

", - "InstanceTypeSpecification$BidPrice": "

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD.

", + "InstanceTypeSpecification$BidPrice": "

The bid price for each Amazon EC2 Spot Instance type as defined by InstanceType. Expressed in USD.

", "InstanceTypeSpecification$CustomAmiId": "

The custom AMI ID to use for the instance type.

", "JobFlowDetail$JobFlowId": "

The job flow identifier.

", "JobFlowDetail$Name": "

The name of the job flow.

", "JobFlowDetail$AmiVersion": "

Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel is used. To specify a custom AMI, use CustomAmiID.

", - "JobFlowInstancesConfig$Ec2KeyName": "

The name of the EC2 key pair that can be used to connect to the master node using SSH as the user called \"hadoop.\"

", + "JobFlowInstancesConfig$Ec2KeyName": "

The name of the Amazon EC2 key pair that can be used to connect to the master node using SSH as the user called \"hadoop.\"

", "JobFlowInstancesConfig$HadoopVersion": "

Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are \"0.18\" (no longer maintained), \"0.20\" (no longer maintained), \"0.20.205\" (no longer maintained), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

", "JobFlowInstancesConfig$Ec2SubnetId": "

Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.

", "JobFlowInstancesConfig$EmrManagedMasterSecurityGroup": "

The identifier of the Amazon EC2 security group for the master node. If you specify EmrManagedMasterSecurityGroup, you must also specify EmrManagedSlaveSecurityGroup.

", @@ -2304,18 +2362,22 @@ "ListNotebookExecutionsInput$EditorId": "

The unique ID of the editor associated with the notebook execution.

", "ListStudioSessionMappingsInput$StudioId": "

The ID of the Amazon EMR Studio.

", "NotebookExecution$NotebookExecutionId": "

The unique identifier of a notebook execution.

", - "NotebookExecution$EditorId": "

The unique identifier of the EMR Notebook that is used for the notebook execution.

", + "NotebookExecution$EditorId": "

The unique identifier of the Amazon EMR Notebook that is used for the notebook execution.

", "NotebookExecution$NotebookExecutionName": "

A name for the notebook execution.

", "NotebookExecution$Arn": "

The Amazon Resource Name (ARN) of the notebook execution.

", - "NotebookExecution$NotebookInstanceSecurityGroupId": "

The unique identifier of the EC2 security group associated with the EMR Notebook instance. For more information see Specifying EC2 Security Groups for EMR Notebooks in the EMR Management Guide.

", + "NotebookExecution$NotebookInstanceSecurityGroupId": "

The unique identifier of the Amazon EC2 security group associated with the Amazon EMR Notebook instance. For more information see Specifying Amazon EC2 Security Groups for Amazon EMR Notebooks in the Amazon EMR Management Guide.

", "NotebookExecutionSummary$NotebookExecutionId": "

The unique identifier of the notebook execution.

", "NotebookExecutionSummary$EditorId": "

The unique identifier of the editor associated with the notebook execution.

", "NotebookExecutionSummary$NotebookExecutionName": "

The name of the notebook execution.

", + "NotebookS3LocationForOutput$Bucket": "

The Amazon S3 bucket that stores the notebook execution input.

", + "NotebookS3LocationFromInput$Bucket": "

The Amazon S3 bucket that stores the notebook execution input.

", "OnDemandCapacityReservationOptions$CapacityReservationResourceGroupArn": "

The ARN of the Capacity Reservation resource group in which to run the instance.

", + "OutputNotebookS3LocationForOutput$Bucket": "

The Amazon S3 bucket that stores the notebook execution output.

", + "OutputNotebookS3LocationFromInput$Bucket": "

The Amazon S3 bucket that stores the notebook execution output.

", "RunJobFlowInput$Name": "

The name of the job flow.

", "RunJobFlowInput$AmiVersion": "

Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel is used. To specify a custom AMI, use CustomAmiID.

", "RunJobFlowInput$ReleaseLabel": "

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion.

", - "RunJobFlowInput$CustomAmiId": "

Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when it launches cluster EC2 instances. For more information about custom AMIs in Amazon EMR, see Using a Custom AMI in the Amazon EMR Management Guide. If omitted, the cluster uses the base Linux AMI for the ReleaseLabel specified. For Amazon EMR versions 2.x and 3.x, use AmiVersion instead.

For information about creating a custom AMI, see Creating an Amazon EBS-Backed Linux AMI in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For information about finding an AMI ID, see Finding a Linux AMI.

", + "RunJobFlowInput$CustomAmiId": "

Available only in Amazon EMR releases 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when it launches cluster Amazon EC2 instances. For more information about custom AMIs in Amazon EMR, see Using a Custom AMI in the Amazon EMR Management Guide. If omitted, the cluster uses the base Linux AMI for the ReleaseLabel specified. For Amazon EMR releases 2.x and 3.x, use AmiVersion instead.

For information about creating a custom AMI, see Creating an Amazon EBS-Backed Linux AMI in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For information about finding an AMI ID, see Finding a Linux AMI.

", "RunJobFlowInput$OSReleaseLabel": "

Specifies a particular Amazon Linux release for all nodes in a cluster launch RunJobFlow request. If a release is not specified, Amazon EMR uses the latest validated Amazon Linux release for cluster launch.

", "RunJobFlowOutput$JobFlowId": "

A unique identifier for the job flow.

", "SecurityGroupsList$member": null, @@ -2327,9 +2389,9 @@ "SessionMappingSummary$IdentityId": "

The globally unique identifier (GUID) of the user or group from the IAM Identity Center Identity Store.

", "SessionMappingSummary$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the IAM Identity Center Identity Store API Reference.

", "SessionMappingSummary$SessionPolicyArn": "

The Amazon Resource Name (ARN) of the session policy associated with the user or group.

", - "StartNotebookExecutionInput$EditorId": "

The unique identifier of the EMR Notebook to use for notebook execution.

", + "StartNotebookExecutionInput$EditorId": "

The unique identifier of the Amazon EMR Notebook to use for notebook execution.

", "StartNotebookExecutionInput$NotebookExecutionName": "

An optional name for the notebook execution.

", - "StartNotebookExecutionInput$NotebookInstanceSecurityGroupId": "

The unique identifier of the Amazon EC2 security group to associate with the EMR Notebook for this notebook execution.

", + "StartNotebookExecutionInput$NotebookInstanceSecurityGroupId": "

The unique identifier of the Amazon EC2 security group to associate with the Amazon EMR Notebook for this notebook execution.

", "StartNotebookExecutionOutput$NotebookExecutionId": "

The unique identifier of the notebook execution.

", "StepConfig$Name": "

The name of the step.

", "StepIdsList$member": null, @@ -2364,10 +2426,10 @@ "XmlStringMaxLen256List": { "base": null, "refs": { - "Ec2InstanceAttributes$RequestedEc2SubnetIds": "

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and Region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

", - "Ec2InstanceAttributes$RequestedEc2AvailabilityZones": "

Applies to clusters configured with the instance fleets option. Specifies one or more Availability Zones in which to launch EC2 cluster instances when the EC2-Classic network configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from among the list of RequestedEc2AvailabilityZones, and then launches all cluster instances within that Availability Zone. If you do not specify this value, Amazon EMR chooses the Availability Zone for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

", - "JobFlowInstancesConfig$Ec2SubnetIds": "

Applies to clusters that use the instance fleet configuration. When multiple EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances in the optimal subnet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", - "PlacementType$AvailabilityZones": "

When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances in the optimal Availability Zone. AvailabilityZones is used for instance fleets, while AvailabilityZone (singular) is used for uniform instance groups.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" + "Ec2InstanceAttributes$RequestedEc2SubnetIds": "

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch Amazon EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the Amazon EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and Region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

", + "Ec2InstanceAttributes$RequestedEc2AvailabilityZones": "

Applies to clusters configured with the instance fleets option. Specifies one or more Availability Zones in which to launch Amazon EC2 cluster instances when the EC2-Classic network configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from among the list of RequestedEc2AvailabilityZones, and then launches all cluster instances within that Availability Zone. If you do not specify this value, Amazon EMR chooses the Availability Zone for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

", + "JobFlowInstancesConfig$Ec2SubnetIds": "

Applies to clusters that use the instance fleet configuration. When multiple Amazon EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances in the optimal subnet.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

", + "PlacementType$AvailabilityZones": "

When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances in the optimal Availability Zone. AvailabilityZones is used for instance fleets, while AvailabilityZone (singular) is used for uniform instance groups.

The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions.

" } } } diff --git a/models/apis/rds/2014-10-31/api-2.json b/models/apis/rds/2014-10-31/api-2.json index 03c9763c3f1..7396dd60fb2 100644 --- a/models/apis/rds/2014-10-31/api-2.json +++ b/models/apis/rds/2014-10-31/api-2.json @@ -1598,7 +1598,8 @@ {"shape":"InvalidDBInstanceStateFault"}, {"shape":"DBClusterAlreadyExistsFault"}, {"shape":"DBInstanceAlreadyExistsFault"}, - {"shape":"DomainNotFoundFault"} + {"shape":"DomainNotFoundFault"}, + {"shape":"StorageTypeNotAvailableFault"} ] }, "ModifyDBClusterEndpoint":{ @@ -2097,7 +2098,8 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DBClusterNotFoundFault"}, {"shape":"DomainNotFoundFault"}, - {"shape":"InsufficientStorageClusterCapacityFault"} + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"StorageTypeNotSupportedFault"} ] }, "RestoreDBClusterFromSnapshot":{ @@ -2938,7 +2940,8 @@ "EngineVersion":{"shape":"String"}, "BackupRetentionPeriod":{"shape":"IntegerOptional"}, "AllocatedStorage":{"shape":"IntegerOptional"}, - "Iops":{"shape":"IntegerOptional"} + "Iops":{"shape":"IntegerOptional"}, + "StorageType":{"shape":"String"} } }, "ConnectionPoolConfiguration":{ @@ -3693,7 +3696,8 @@ "ServerlessV2ScalingConfiguration":{"shape":"ServerlessV2ScalingConfigurationInfo"}, "NetworkType":{"shape":"String"}, "DBSystemId":{"shape":"String"}, - "MasterUserSecret":{"shape":"MasterUserSecret"} + "MasterUserSecret":{"shape":"MasterUserSecret"}, + "IOOptimizedNextAllowedModificationTime":{"shape":"TStamp"} }, "wrapper":true }, @@ -4010,7 +4014,8 @@ "SourceDBClusterSnapshotArn":{"shape":"String"}, "IAMDatabaseAuthenticationEnabled":{"shape":"Boolean"}, "TagList":{"shape":"TagList"}, - "DBSystemId":{"shape":"String"} + "DBSystemId":{"shape":"String"}, + "StorageType":{"shape":"String"} }, "wrapper":true }, @@ -7805,7 +7810,8 @@ "ServerlessV2ScalingConfiguration":{"shape":"ServerlessV2ScalingConfiguration"}, "NetworkType":{"shape":"String"}, "ManageMasterUserPassword":{"shape":"BooleanOptional"}, - "MasterUserSecretKmsKeyId":{"shape":"String"} + "MasterUserSecretKmsKeyId":{"shape":"String"}, + "StorageType":{"shape":"String"} } }, "RestoreDBClusterFromS3Result":{ @@ -8412,6 +8418,17 @@ }, "exception":true }, + "StorageTypeNotAvailableFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageTypeNotAvailableFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "StorageTypeNotSupportedFault":{ "type":"structure", "members":{ diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 55eb7145c47..1c97e66d5f2 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -442,7 +442,7 @@ "DBInstance$PubliclyAccessible": "

Specifies the accessibility options for the DB instance.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

", "DBInstance$StorageEncrypted": "

Specifies whether the DB instance is encrypted.

", "DBInstance$CopyTagsToSnapshot": "

Specifies whether tags are copied from the DB instance to snapshots of the DB instance.

Amazon Aurora

Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see DBCluster.

", - "DBInstance$IAMDatabaseAuthenticationEnabled": "

True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

IAM database authentication can be enabled for the following database engines:

", + "DBInstance$IAMDatabaseAuthenticationEnabled": "

True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

For a list of engine versions that support IAM database authentication, see IAM database authentication in the Amazon RDS User Guide and IAM database authentication in Aurora in the Amazon Aurora User Guide.

", "DBInstance$DeletionProtection": "

Indicates if the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. For more information, see Deleting a DB Instance.

", "DBInstanceAutomatedBackup$Encrypted": "

Specifies whether the automated backup is encrypted.

", "DBInstanceAutomatedBackup$IAMDatabaseAuthenticationEnabled": "

True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

", @@ -595,7 +595,7 @@ "ModifyDBInstanceMessage$DeletionProtection": "

A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.

", "ModifyDBInstanceMessage$CertificateRotationRestart": "

A value that indicates whether the DB instance is restarted when you rotate your SSL/TLS certificate.

By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted.

Set this parameter only if you are not using SSL/TLS to connect to the DB instance.

If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate:

This setting doesn't apply to RDS Custom.

", "ModifyDBInstanceMessage$EnableCustomerOwnedIp": "

A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.

A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.

", - "ModifyDBInstanceMessage$ManageMasterUserPassword": "

A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager.

If the DB cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword.

If the DB cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword.

For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

Constraints:

", + "ModifyDBInstanceMessage$ManageMasterUserPassword": "

A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager.

If the DB instance doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword.

If the DB instance already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword.

For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

Constraints:

", "ModifyDBInstanceMessage$RotateMasterUserPassword": "

A value that indicates whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password.

This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The secret value contains the updated password.

For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

Constraints:

", "ModifyDBProxyRequest$RequireTLS": "

Whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy, even if the associated database doesn't use TLS.

", "ModifyDBProxyRequest$DebugLogging": "

Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs.

", @@ -3922,8 +3922,13 @@ "refs": { } }, + "StorageTypeNotAvailableFault": { + "base": "

The aurora-iopt1 storage type isn't available, because you modified the DB cluster to use this storage type less than one month ago.

", + "refs": { + } + }, "StorageTypeNotSupportedFault": { - "base": "

Storage of the StorageType specified can't be associated with the DB instance.

", + "base": "

The specified StorageType can't be associated with the DB instance.

", "refs": { } }, @@ -3969,6 +3974,7 @@ "ClusterPendingModifiedValues$DBClusterIdentifier": "

The DBClusterIdentifier value for the DB cluster.

", "ClusterPendingModifiedValues$MasterUserPassword": "

The master credentials for the DB cluster.

", "ClusterPendingModifiedValues$EngineVersion": "

The database engine version.

", + "ClusterPendingModifiedValues$StorageType": "

The storage type for the DB cluster.

", "ConnectionPoolConfiguration$InitQuery": "

One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2.

Default: no initialization query

", "ConnectionPoolConfigurationInfo$InitQuery": "

One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2.

", "CopyDBClusterParameterGroupMessage$SourceDBClusterParameterGroupIdentifier": "

The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group. For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon Aurora User Guide.

Constraints:

", @@ -4008,12 +4014,12 @@ "CreateDBClusterMessage$ReplicationSourceIdentifier": "

The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "CreateDBClusterMessage$KmsKeyId": "

The Amazon Web Services KMS key identifier for an encrypted DB cluster.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

When a KMS key isn't specified in KmsKeyId:

There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

If you create a read replica of an encrypted DB cluster in another Amazon Web Services Region, you must set KmsKeyId to a KMS key identifier that is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the read replica in that Amazon Web Services Region.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "CreateDBClusterMessage$PreSignedUrl": "

When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster operation to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster.

The presigned URL must be a valid request for the CreateDBCluster API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster to copy.

The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.

Valid for: Aurora DB clusters only

", - "CreateDBClusterMessage$EngineMode": "

The DB engine mode of the DB cluster, either provisioned or serverless.

The serverless engine mode only applies for Aurora Serverless v1 DB clusters.

Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:

Valid for: Aurora DB clusters only

", + "CreateDBClusterMessage$EngineMode": "

The DB engine mode of the DB cluster, either provisioned or serverless.

The serverless engine mode only applies for Aurora Serverless v1 DB clusters.

For information about limitations and requirements for Serverless DB clusters, see the following sections in the Amazon Aurora User Guide:

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$GlobalClusterIdentifier": "

The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster.

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$Domain": "

The Active Directory directory ID to create the DB cluster in.

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster.

For more information, see Kerberos authentication in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$DBClusterInstanceClass": "

The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS User Guide.

This setting is required to create a Multi-AZ DB cluster.

Valid for: Multi-AZ DB clusters only

", - "CreateDBClusterMessage$StorageType": "

Specifies the storage type to be associated with the DB cluster.

This setting is required to create a Multi-AZ DB cluster.

Valid values: io1

When specified, a value for the Iops parameter is required.

Default: io1

Valid for: Multi-AZ DB clusters only

", + "CreateDBClusterMessage$StorageType": "

Specifies the storage type to be associated with the DB cluster.

This setting is required to create a Multi-AZ DB cluster.

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "CreateDBClusterMessage$MonitoringRoleArn": "

The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

Valid for: Multi-AZ DB clusters only

", "CreateDBClusterMessage$PerformanceInsightsKMSKeyId": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

Valid for: Multi-AZ DB clusters only

", "CreateDBClusterMessage$NetworkType": "

The network type of the DB cluster.

Valid values:

The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", @@ -4117,11 +4123,11 @@ "DBCluster$DbClusterResourceId": "

The Amazon Web Services Region-unique, immutable identifier for the DB cluster. This identifier is found in Amazon Web Services CloudTrail log entries whenever the KMS key for the DB cluster is accessed.

", "DBCluster$DBClusterArn": "

The Amazon Resource Name (ARN) for the DB cluster.

", "DBCluster$CloneGroupId": "

Identifies the clone group to which the DB cluster is associated.

", - "DBCluster$EngineMode": "

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

For more information, see CreateDBCluster.

", + "DBCluster$EngineMode": "

The DB engine mode of the DB cluster, either provisioned or serverless.

For more information, see CreateDBCluster.

", "DBCluster$ActivityStreamKmsKeyId": "

The Amazon Web Services KMS key identifier used for encrypting messages in the database activity stream.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

", "DBCluster$ActivityStreamKinesisStreamName": "

The name of the Amazon Kinesis data stream used for the database activity stream.

", "DBCluster$DBClusterInstanceClass": "

The name of the compute and memory capacity class of the DB instance.

This setting is only for non-Aurora Multi-AZ DB clusters.

", - "DBCluster$StorageType": "

The storage type associated with the DB cluster.

This setting is only for non-Aurora Multi-AZ DB clusters.

", + "DBCluster$StorageType": "

The storage type associated with the DB cluster.

", "DBCluster$MonitoringRoleArn": "

The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs.

This setting is only for non-Aurora Multi-AZ DB clusters.

", "DBCluster$PerformanceInsightsKMSKeyId": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

This setting is only for non-Aurora Multi-AZ DB clusters.

", "DBCluster$NetworkType": "

The network type of the DB instance.

Valid values:

The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

This setting is only for Aurora DB clusters.

", @@ -4170,6 +4176,7 @@ "DBClusterSnapshot$DBClusterSnapshotArn": "

The Amazon Resource Name (ARN) for the DB cluster snapshot.

", "DBClusterSnapshot$SourceDBClusterSnapshotArn": "

If the DB cluster snapshot was copied from a source DB cluster snapshot, the Amazon Resource Name (ARN) for the source DB cluster snapshot, otherwise, a null value.

", "DBClusterSnapshot$DBSystemId": "

Reserved for future use.

", + "DBClusterSnapshot$StorageType": "

The storage type associated with the DB cluster snapshot.

This setting is only for Aurora DB clusters.

", "DBClusterSnapshotAttribute$AttributeName": "

The name of the manual DB cluster snapshot attribute.

The attribute named restore refers to the list of Amazon Web Services accounts that have permission to copy or restore the manual DB cluster snapshot. For more information, see the ModifyDBClusterSnapshotAttribute API action.

", "DBClusterSnapshotAttributesResult$DBClusterSnapshotIdentifier": "

The identifier of the manual DB cluster snapshot that the attributes apply to.

", "DBClusterSnapshotMessage$Marker": "

An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", @@ -4337,7 +4344,7 @@ "DescribeCertificatesMessage$CertificateIdentifier": "

The user-supplied certificate identifier. If this parameter is specified, information for only the identified certificate is returned. This parameter isn't case-sensitive.

Constraints:

", "DescribeCertificatesMessage$Marker": "

An optional pagination token provided by a previous DescribeCertificates request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeDBClusterBacktracksMessage$DBClusterIdentifier": "

The DB cluster identifier of the DB cluster to be described. This parameter is stored as a lowercase string.

Constraints:

Example: my-cluster1

", - "DescribeDBClusterBacktracksMessage$BacktrackIdentifier": "

If specified, this value is the backtrack identifier of the backtrack to be described.

Constraints:

Example: 123e4567-e89b-12d3-a456-426655440000

", + "DescribeDBClusterBacktracksMessage$BacktrackIdentifier": "

If specified, this value is the backtrack identifier of the backtrack to be described.

Constraints:

Example: 123e4567-e89b-12d3-a456-426655440000

", "DescribeDBClusterBacktracksMessage$Marker": "

An optional pagination token provided by a previous DescribeDBClusterBacktracks request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeDBClusterEndpointsMessage$DBClusterIdentifier": "

The DB cluster identifier of the DB cluster associated with the endpoint. This parameter is stored as a lowercase string.

", "DescribeDBClusterEndpointsMessage$DBClusterEndpointIdentifier": "

The identifier of the endpoint to describe. This parameter is stored as a lowercase string.

", @@ -4531,7 +4538,7 @@ "ModifyDBClusterMessage$Domain": "

The Active Directory directory ID to move the DB cluster to. Specify none to remove the cluster from its current domain. The domain must be created prior to this operation.

For more information, see Kerberos Authentication in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", "ModifyDBClusterMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

Valid for: Aurora DB clusters only

", "ModifyDBClusterMessage$DBClusterInstanceClass": "

The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

For the full list of DB instance classes and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Valid for: Multi-AZ DB clusters only

", - "ModifyDBClusterMessage$StorageType": "

Specifies the storage type to be associated with the DB cluster.

Valid values: io1

When specified, a value for the Iops parameter is required.

Default: io1

Valid for: Multi-AZ DB clusters only

", + "ModifyDBClusterMessage$StorageType": "

Specifies the storage type to be associated with the DB cluster.

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "ModifyDBClusterMessage$MonitoringRoleArn": "

The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

Valid for: Multi-AZ DB clusters only

", "ModifyDBClusterMessage$PerformanceInsightsKMSKeyId": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

Valid for: Multi-AZ DB clusters only

", "ModifyDBClusterMessage$NetworkType": "

The network type of the DB cluster.

Valid values:

The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", @@ -4716,6 +4723,7 @@ "RestoreDBClusterFromS3Message$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

", "RestoreDBClusterFromS3Message$NetworkType": "

The network type of the DB cluster.

Valid values:

The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

", "RestoreDBClusterFromS3Message$MasterUserSecretKmsKeyId": "

The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

", + "RestoreDBClusterFromS3Message$StorageType": "

Specifies the storage type to be associated with the DB cluster.

Valid values: aurora, aurora-iopt1

Default: aurora

Valid for: Aurora DB clusters only

", "RestoreDBClusterFromSnapshotMessage$DBClusterIdentifier": "

The name of the DB cluster to create from the DB snapshot or DB cluster snapshot. This parameter isn't case-sensitive.

Constraints:

Example: my-snapshot-id

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterFromSnapshotMessage$SnapshotIdentifier": "

The identifier for the DB snapshot or DB cluster snapshot to restore from.

You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.

Constraints:

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterFromSnapshotMessage$Engine": "

The database engine to use for the new DB cluster.

Default: The same as source

Constraint: Must be compatible with the engine of the source

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", @@ -4729,7 +4737,7 @@ "RestoreDBClusterFromSnapshotMessage$Domain": "

Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

For more information, see Kerberos Authentication in the Amazon RDS User Guide.

Valid for: Aurora DB clusters only

", "RestoreDBClusterFromSnapshotMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

Valid for: Aurora DB clusters only

", "RestoreDBClusterFromSnapshotMessage$DBClusterInstanceClass": "

The compute and memory capacity of the each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Valid for: Multi-AZ DB clusters only

", - "RestoreDBClusterFromSnapshotMessage$StorageType": "

Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.

Valid values: io1

When specified, a value for the Iops parameter is required.

Default: io1

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", + "RestoreDBClusterFromSnapshotMessage$StorageType": "

Specifies the storage type to be associated with the DB cluster.

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterFromSnapshotMessage$NetworkType": "

The network type of the DB cluster.

Valid values:

The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", "RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "

The name of the new DB cluster to be created.

Constraints:

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$RestoreType": "

The type of restore to be performed. You can specify one of the following values:

If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", @@ -4742,7 +4750,7 @@ "RestoreDBClusterToPointInTimeMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

Valid for: Aurora DB clusters only

", "RestoreDBClusterToPointInTimeMessage$EngineMode": "

The engine mode of the new cluster. Specify provisioned or serverless, depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 cluster. To create a clone that is an Aurora Serverless v1 cluster, the original cluster must be an Aurora Serverless v1 cluster or an encrypted provisioned cluster.

Valid for: Aurora DB clusters only

", "RestoreDBClusterToPointInTimeMessage$DBClusterInstanceClass": "

The compute and memory capacity of the each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

For the full list of DB instance classes, and availability for your engine, see DB instance class in the Amazon RDS User Guide.

Valid for: Multi-AZ DB clusters only

", - "RestoreDBClusterToPointInTimeMessage$StorageType": "

Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.

Valid values: io1

When specified, a value for the Iops parameter is required.

Default: io1

Valid for: Multi-AZ DB clusters only

", + "RestoreDBClusterToPointInTimeMessage$StorageType": "

Specifies the storage type to be associated with the DB cluster.

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$NetworkType": "

The network type of the DB cluster.

Valid values:

The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

Name of the DB instance to create from the DB snapshot. This parameter isn't case-sensitive.

Constraints:

Example: my-snapshot-id

", "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

The identifier for the DB snapshot to restore from.

Constraints:

", @@ -5016,6 +5024,7 @@ "DBCluster$LatestRestorableTime": "

Specifies the latest time to which a database can be restored with point-in-time restore.

", "DBCluster$ClusterCreateTime": "

Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

", "DBCluster$EarliestBacktrackTime": "

The earliest time to which a DB cluster can be backtracked.

", + "DBCluster$IOOptimizedNextAllowedModificationTime": "

The next time you can modify the DB cluster to use the aurora-iopt1 storage type.

This setting is only for Aurora DB clusters.

", "DBClusterBacktrack$BacktrackTo": "

The timestamp of the time to which the DB cluster was backtracked.

", "DBClusterBacktrack$BacktrackedFrom": "

The timestamp of the time from which the DB cluster was backtracked.

", "DBClusterBacktrack$BacktrackRequestCreationTime": "

The timestamp of the time at which the backtrack was requested.

", diff --git a/models/apis/swf/2012-01-25/api-2.json b/models/apis/swf/2012-01-25/api-2.json index 2233e52449c..fec93b09f59 100644 --- a/models/apis/swf/2012-01-25/api-2.json +++ b/models/apis/swf/2012-01-25/api-2.json @@ -1730,7 +1730,8 @@ "identity":{"shape":"Identity"}, "nextPageToken":{"shape":"PageToken"}, "maximumPageSize":{"shape":"PageSize"}, - "reverseOrder":{"shape":"ReverseOrder"} + "reverseOrder":{"shape":"ReverseOrder"}, + "startAtPreviousStartedEvent":{"shape":"StartAtPreviousStartedEvent"} } }, "RecordActivityTaskHeartbeatInput":{ @@ -2141,6 +2142,7 @@ "input":{"shape":"Data"} } }, + "StartAtPreviousStartedEvent":{"type":"boolean"}, "StartChildWorkflowExecutionDecisionAttributes":{ "type":"structure", "required":[ diff --git a/models/apis/swf/2012-01-25/docs-2.json b/models/apis/swf/2012-01-25/docs-2.json index 5031fae6ec0..0b11e425a54 100644 --- a/models/apis/swf/2012-01-25/docs-2.json +++ b/models/apis/swf/2012-01-25/docs-2.json @@ -49,7 +49,7 @@ "ActivityTaskScheduledEventAttributes$activityId": "

The unique ID of the activity task.

", "RequestCancelActivityTaskDecisionAttributes$activityId": "

The activityId of the activity task to be canceled.

", "RequestCancelActivityTaskFailedEventAttributes$activityId": "

The activityId provided in the RequestCancelActivityTask decision that failed.

", - "ScheduleActivityTaskDecisionAttributes$activityId": "

The activityId of the activity task.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not contain the literal string arn.

", + "ScheduleActivityTaskDecisionAttributes$activityId": "

The activityId of the activity task.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "ScheduleActivityTaskFailedEventAttributes$activityId": "

The activityId provided in the ScheduleActivityTask decision that failed.

" } }, @@ -577,11 +577,11 @@ "PollForActivityTaskInput$domain": "

The name of the domain that contains the task lists being polled.

", "PollForDecisionTaskInput$domain": "

The name of the domain containing the task lists to poll.

", "RegisterActivityTypeInput$domain": "

The name of the domain in which this activity is to be registered.

", - "RegisterDomainInput$name": "

Name of the domain to register. The name must be unique in the region that the domain is registered in.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "RegisterDomainInput$name": "

Name of the domain to register. The name must be unique in the region that the domain is registered in.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "RegisterWorkflowTypeInput$domain": "

The name of the domain in which to register the workflow type.

", "RequestCancelWorkflowExecutionInput$domain": "

The name of the domain containing the workflow execution to cancel.

", "SignalWorkflowExecutionInput$domain": "

The name of the domain containing the workflow execution to signal.

", - "StartWorkflowExecutionInput$domain": "

The name of the domain in which the workflow execution is created.

", + "StartWorkflowExecutionInput$domain": "

The name of the domain in which the workflow execution is created.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "TerminateWorkflowExecutionInput$domain": "

The domain of the workflow execution to terminate.

", "UndeprecateActivityTypeInput$domain": "

The name of the domain of the deprecated activity type.

", "UndeprecateDomainInput$name": "

The name of the domain of the deprecated workflow type.

", @@ -629,7 +629,7 @@ "ScheduleActivityTaskDecisionAttributes$scheduleToStartTimeout": "

If set, specifies the maximum duration the activity task can wait to be assigned to a worker. This overrides the default schedule-to-start timeout specified when registering the activity type using RegisterActivityType.

The duration is specified in seconds, an integer greater than or equal to 0. You can use NONE to specify unlimited duration.

A schedule-to-start timeout for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default schedule-to-start timeout was specified at registration time then a fault is returned.

", "ScheduleActivityTaskDecisionAttributes$startToCloseTimeout": "

If set, specifies the maximum duration a worker may take to process this activity task. This overrides the default start-to-close timeout specified when registering the activity type using RegisterActivityType.

The duration is specified in seconds, an integer greater than or equal to 0. You can use NONE to specify unlimited duration.

A start-to-close timeout for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default start-to-close timeout was specified at registration time then a fault is returned.

", "ScheduleActivityTaskDecisionAttributes$heartbeatTimeout": "

If set, specifies the maximum time before which a worker processing a task of this type must report progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, the activity task is automatically timed out. If the worker subsequently attempts to record a heartbeat or returns a result, it is ignored. This overrides the default heartbeat timeout specified when registering the activity type using RegisterActivityType.

The duration is specified in seconds, an integer greater than or equal to 0. You can use NONE to specify unlimited duration.

", - "ScheduleLambdaFunctionDecisionAttributes$startToCloseTimeout": "

The timeout value, in seconds, after which the Lambda function is considered to be failed once it has started. This can be any integer from 1-300 (1s-5m). If no value is supplied, than a default value of 300s is assumed.

", + "ScheduleLambdaFunctionDecisionAttributes$startToCloseTimeout": "

The timeout value, in seconds, after which the Lambda function is considered to be failed once it has started. This can be any integer from 1-900 (1s-15m).

If no value is supplied, then a default value of 900s is assumed.

", "StartChildWorkflowExecutionDecisionAttributes$executionStartToCloseTimeout": "

The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.

The duration is specified in seconds, an integer greater than or equal to 0. You can use NONE to specify unlimited duration.

An execution start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default execution start-to-close timeout was specified at registration time then a fault is returned.

", "StartChildWorkflowExecutionDecisionAttributes$taskStartToCloseTimeout": "

Specifies the maximum duration of decision tasks for this workflow execution. This parameter overrides the defaultTaskStartToCloseTimout specified when registering the workflow type using RegisterWorkflowType.

The duration is specified in seconds, an integer greater than or equal to 0. You can use NONE to specify unlimited duration.

A task start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault is returned.

", "StartChildWorkflowExecutionInitiatedEventAttributes$executionStartToCloseTimeout": "

The maximum duration for the child workflow execution. If the workflow execution isn't closed within this duration, it is timed out and force-terminated.

The duration is specified in seconds, an integer greater than or equal to 0. You can use NONE to specify unlimited duration.

", @@ -960,8 +960,8 @@ "ActivityType$name": "

The name of this activity.

The combination of activity type name and version must be unique within a domain.

", "ListActivityTypesInput$name": "

If specified, only lists the activity types that have this name.

", "ListWorkflowTypesInput$name": "

If specified, lists the workflow type with this name.

", - "RegisterActivityTypeInput$name": "

The name of the activity type within the domain.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", - "RegisterWorkflowTypeInput$name": "

The name of the workflow type.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "RegisterActivityTypeInput$name": "

The name of the activity type within the domain.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "RegisterWorkflowTypeInput$name": "

The name of the workflow type.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "TaskList$name": "

The name of the task list.

", "WorkflowType$name": "

The name of the workflow type.

The combination of workflow type name and version must be unique with in a domain.

", "WorkflowTypeFilter$name": "

Name of the workflow type.

" @@ -996,14 +996,14 @@ "ActivityTypeInfos$nextPageToken": "

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

", "DecisionTask$nextPageToken": "

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

", "DomainInfos$nextPageToken": "

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

", - "GetWorkflowExecutionHistoryInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", + "GetWorkflowExecutionHistoryInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", "History$nextPageToken": "

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

", - "ListActivityTypesInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", - "ListClosedWorkflowExecutionsInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", - "ListDomainsInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", - "ListOpenWorkflowExecutionsInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", - "ListWorkflowTypesInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", - "PollForDecisionTaskInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

The nextPageToken returned by this action cannot be used with GetWorkflowExecutionHistory to get the next page. You must call PollForDecisionTask again (with the nextPageToken) to retrieve the next page of history records. Calling PollForDecisionTask with a nextPageToken doesn't return a new decision task.

", + "ListActivityTypesInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", + "ListClosedWorkflowExecutionsInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", + "ListDomainsInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", + "ListOpenWorkflowExecutionsInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", + "ListWorkflowTypesInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

", + "PollForDecisionTaskInput$nextPageToken": "

If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return a 400 error: \"Specified token has exceeded its maximum lifetime\".

The configured maximumPageSize determines how many results can be returned in a single call.

The nextPageToken returned by this action cannot be used with GetWorkflowExecutionHistory to get the next page. You must call PollForDecisionTask again (with the nextPageToken) to retrieve the next page of history records. Calling PollForDecisionTask with a nextPageToken doesn't return a new decision task.

", "WorkflowExecutionInfos$nextPageToken": "

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

", "WorkflowTypeInfos$nextPageToken": "

If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

The configured maximumPageSize determines how many results can be returned in a single call.

" } @@ -1263,6 +1263,12 @@ "refs": { } }, + "StartAtPreviousStartedEvent": { + "base": null, + "refs": { + "PollForDecisionTaskInput$startAtPreviousStartedEvent": "

When set to true, returns the events with eventTimestamp greater than or equal to eventTimestamp of the most recent DecisionTaskStarted event. By default, this parameter is set to false.

" + } + }, "StartChildWorkflowExecutionDecisionAttributes": { "base": "

Provides the details of the StartChildWorkflowExecution decision.

Access Control

You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide.

", "refs": { @@ -1364,14 +1370,14 @@ "CountPendingActivityTasksInput$taskList": "

The name of the task list.

", "CountPendingDecisionTasksInput$taskList": "

The name of the task list.

", "DecisionTaskScheduledEventAttributes$taskList": "

The name of the task list in which the decision task was scheduled.

", - "PollForActivityTaskInput$taskList": "

Specifies the task list to poll for activity tasks.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", - "PollForDecisionTaskInput$taskList": "

Specifies the task list to poll for decision tasks.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "PollForActivityTaskInput$taskList": "

Specifies the task list to poll for activity tasks.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "PollForDecisionTaskInput$taskList": "

Specifies the task list to poll for decision tasks.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "RegisterActivityTypeInput$defaultTaskList": "

If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list isn't provided when a task is scheduled through the ScheduleActivityTask Decision.

", "RegisterWorkflowTypeInput$defaultTaskList": "

If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list isn't provided when starting the execution through the StartWorkflowExecution Action or StartChildWorkflowExecution Decision.

", - "ScheduleActivityTaskDecisionAttributes$taskList": "

If set, specifies the name of the task list in which to schedule the activity task. If not specified, the defaultTaskList registered with the activity type is used.

A task list for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default task list was specified at registration time then a fault is returned.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not contain the literal string arn.

", - "StartChildWorkflowExecutionDecisionAttributes$taskList": "

The name of the task list to be used for decision tasks of the child workflow execution.

A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault is returned.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not contain the literal string arn.

", + "ScheduleActivityTaskDecisionAttributes$taskList": "

If set, specifies the name of the task list in which to schedule the activity task. If not specified, the defaultTaskList registered with the activity type is used.

A task list for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default task list was specified at registration time then a fault is returned.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "StartChildWorkflowExecutionDecisionAttributes$taskList": "

The name of the task list to be used for decision tasks of the child workflow execution.

A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault is returned.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "StartChildWorkflowExecutionInitiatedEventAttributes$taskList": "

The name of the task list used for the decision tasks of the child workflow execution.

", - "StartWorkflowExecutionInput$taskList": "

The task list to use for the decision tasks generated for this workflow execution. This overrides the defaultTaskList specified when registering the workflow type.

A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault is returned.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "StartWorkflowExecutionInput$taskList": "

The task list to use for the decision tasks generated for this workflow execution. This overrides the defaultTaskList specified when registering the workflow type.

A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault is returned.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "WorkflowExecutionConfiguration$taskList": "

The task list used for the decision tasks generated for this workflow execution.

", "WorkflowExecutionContinuedAsNewEventAttributes$taskList": "

The task list to use for the decisions of the new (continued) workflow execution.

", "WorkflowExecutionStartedEventAttributes$taskList": "

The name of the task list for scheduling the decision tasks for this workflow execution.

", @@ -1438,7 +1444,7 @@ "refs": { "CancelTimerDecisionAttributes$timerId": "

The unique ID of the timer to cancel.

", "CancelTimerFailedEventAttributes$timerId": "

The timerId provided in the CancelTimer decision that failed.

", - "StartTimerDecisionAttributes$timerId": "

The unique ID of the timer.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not contain the literal string arn.

", + "StartTimerDecisionAttributes$timerId": "

The unique ID of the timer.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "StartTimerFailedEventAttributes$timerId": "

The timerId provided in the StartTimer decision that failed.

", "TimerCanceledEventAttributes$timerId": "

The unique ID of the timer that was canceled.

", "TimerFiredEventAttributes$timerId": "

The unique ID of the timer that fired.

", @@ -1518,8 +1524,8 @@ "refs": { "ActivityType$version": "

The version of this activity.

The combination of activity type name and version must be unique with in a domain.

", "ContinueAsNewWorkflowExecutionDecisionAttributes$workflowTypeVersion": "

The version of the workflow to start.

", - "RegisterActivityTypeInput$version": "

The version of the activity type.

The activity type consists of the name and version, the combination of which must be unique within the domain.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", - "RegisterWorkflowTypeInput$version": "

The version of the workflow type.

The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "RegisterActivityTypeInput$version": "

The version of the activity type.

The activity type consists of the name and version, the combination of which must be unique within the domain.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "RegisterWorkflowTypeInput$version": "

The version of the workflow type.

The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "WorkflowType$version": "

The version of the workflow type.

The combination of workflow type name and version must be unique with in a domain.

" } }, @@ -1689,10 +1695,10 @@ "SignalExternalWorkflowExecutionFailedEventAttributes$workflowId": "

The workflowId of the external workflow execution that the signal was being delivered to.

", "SignalExternalWorkflowExecutionInitiatedEventAttributes$workflowId": "

The workflowId of the external workflow execution.

", "SignalWorkflowExecutionInput$workflowId": "

The workflowId of the workflow execution to signal.

", - "StartChildWorkflowExecutionDecisionAttributes$workflowId": "

The workflowId of the workflow execution.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not contain the literal string arn.

", + "StartChildWorkflowExecutionDecisionAttributes$workflowId": "

The workflowId of the workflow execution.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "StartChildWorkflowExecutionFailedEventAttributes$workflowId": "

The workflowId of the child workflow execution.

", "StartChildWorkflowExecutionInitiatedEventAttributes$workflowId": "

The workflowId of the child workflow execution.

", - "StartWorkflowExecutionInput$workflowId": "

The user defined identifier associated with the workflow execution. You can use this to associate a custom identifier with the workflow execution. You may specify the same identifier if a workflow execution is logically a restart of a previous execution. You cannot have two open workflow executions with the same workflowId at the same time within the same domain.

The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", + "StartWorkflowExecutionInput$workflowId": "

The user defined identifier associated with the workflow execution. You can use this to associate a custom identifier with the workflow execution. You may specify the same identifier if a workflow execution is logically a restart of a previous execution. You cannot have two open workflow executions with the same workflowId at the same time within the same domain.

The specified string must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f-\\u009f). Also, it must not be the literal string arn.

", "TerminateWorkflowExecutionInput$workflowId": "

The workflowId of the workflow execution to terminate.

", "WorkflowExecution$workflowId": "

The user defined identifier associated with the workflow execution.

", "WorkflowExecutionFilter$workflowId": "

The workflowId to pass of match the criteria of this filter.

" diff --git a/models/apis/swf/2012-01-25/endpoint-rule-set-1.json b/models/apis/swf/2012-01-25/endpoint-rule-set-1.json index dee6b2cbbb6..64949755fc8 100644 --- a/models/apis/swf/2012-01-25/endpoint-rule-set-1.json +++ b/models/apis/swf/2012-01-25/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,179 +111,240 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://swf-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://swf-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://swf.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://swf.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://swf-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://swf-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://swf.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -311,7 +352,7 @@ { "conditions": [], "endpoint": { - "url": "https://swf.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://swf.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -320,66 +361,13 @@ ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-east-1" - ] - } - ], - "endpoint": { - "url": "https://swf.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-west-1" - ] - } - ], - "endpoint": { - "url": "https://swf.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://swf.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/swf/2012-01-25/endpoint-tests-1.json b/models/apis/swf/2012-01-25/endpoint-tests-1.json index df163542784..12163ee63d1 100644 --- a/models/apis/swf/2012-01-25/endpoint-tests-1.json +++ b/models/apis/swf/2012-01-25/endpoint-tests-1.json @@ -1,42 +1,55 @@ { "testCases": [ { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-iso-east-1.c2s.ic.gov" + "url": "https://swf.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "af-south-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-iso-west-1.c2s.ic.gov" + "url": "https://swf.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "us-iso-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://swf.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-iso-east-1" + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://swf.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -47,87 +60,87 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-northeast-3" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-east-1.amazonaws.com" + "url": "https://swf.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf-fips.us-east-1.amazonaws.com" + "url": "https://swf.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-east-1" + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.eu-west-1.amazonaws.com" + "url": "https://swf.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.eu-west-2.amazonaws.com" + "url": "https://swf.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.eu-west-3.amazonaws.com" + "url": "https://swf.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.me-south-1.amazonaws.com" + "url": "https://swf.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -138,139 +151,139 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-north-1", "UseFIPS": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-east-2.amazonaws.com" + "url": "https://swf.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf-fips.us-east-2.amazonaws.com" + "url": "https://swf.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-east-2" + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.sa-east-1.amazonaws.com" + "url": "https://swf.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.ap-east-1.amazonaws.com" + "url": "https://swf.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-3", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.eu-south-1.amazonaws.com" + "url": "https://swf.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "me-south-1", "UseFIPS": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.eu-central-1.amazonaws.com" + "url": "https://swf.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.ap-southeast-1.amazonaws.com" + "url": "https://swf.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.ap-southeast-2.amazonaws.com" + "url": "https://swf-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-southeast-2" + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.ap-southeast-3.amazonaws.com" + "url": "https://swf.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.ca-central-1.amazonaws.com" + "url": "https://swf-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ca-central-1" + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { @@ -281,9 +294,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -294,9 +307,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": true, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -307,9 +320,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -320,139 +333,152 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": true, - "Region": "us-west-2" + "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://swf.af-south-1.amazonaws.com" + "url": "https://swf-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "af-south-1" + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://swf.ap-south-1.amazonaws.com" + "url": "https://swf.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.ap-northeast-1.amazonaws.com" + "url": "https://swf.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.ap-northeast-2.amazonaws.com" + "url": "https://swf.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://swf-fips.us-east-1.api.aws" + "url": "https://swf-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-east-1.api.aws" + "url": "https://swf-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://swf.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-gov-west-1.amazonaws.com" + "url": "https://swf.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-gov-west-1.amazonaws.com" + "url": "https://swf.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-gov-east-1.amazonaws.com" + "url": "https://swf.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-gov-east-1.amazonaws.com" + "url": "https://swf.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -463,9 +489,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -476,113 +502,144 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.us-isob-east-1.sc2s.sgov.gov" + "url": "https://swf.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://swf.us-iso-west-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.cn-northwest-1.amazonaws.com.cn" + "url": "https://swf-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf.cn-north-1.amazonaws.com.cn" + "url": "https://swf.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://swf-fips.cn-north-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://swf-fips.cn-north-1.amazonaws.com.cn" + "url": "https://swf-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://swf.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://example.com" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -592,9 +649,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -604,11 +661,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/swf/2012-01-25/smoke.json b/models/apis/swf/2012-01-25/smoke.json new file mode 100644 index 00000000000..fd79cd97a29 --- /dev/null +++ b/models/apis/swf/2012-01-25/smoke.json @@ -0,0 +1,20 @@ +{ + "version": 1, + "defaultRegion": "us-west-2", + "testCases": [ + { + "operationName": "ListDomains", + "input": { + "registrationStatus": "REGISTERED" + }, + "errorExpectedFromService": false + }, + { + "operationName": "DescribeDomain", + "input": { + "name": "fake_domain" + }, + "errorExpectedFromService": true + } + ] +} \ No newline at end of file diff --git a/service/emr/api.go b/service/emr/api.go index d32fef7c3e8..8acc5107081 100644 --- a/service/emr/api.go +++ b/service/emr/api.go @@ -58,7 +58,7 @@ func (c *EMR) AddInstanceFleetRequest(input *AddInstanceFleetInput) (req *reques // // Adds an instance fleet to a running cluster. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -414,7 +414,7 @@ func (c *EMR) CancelStepsRequest(input *CancelStepsInput) (req *request.Request, // EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps // are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; // it does not guarantee that a step will be canceled, even if the request is -// successfully submitted. When you use Amazon EMR versions 5.28.0 and later, +// successfully submitted. When you use Amazon EMR releases 5.28.0 and later, // you can cancel steps that are in a PENDING or RUNNING state. In earlier versions // of Amazon EMR, you can only cancel steps that are in a PENDING state. // @@ -1281,10 +1281,10 @@ func (c *EMR) DescribeReleaseLabelRequest(input *DescribeReleaseLabelInput) (req // DescribeReleaseLabel API operation for Amazon EMR. // -// Provides EMR release label details, such as releases available the region -// where the API request is run, and the available applications for a specific -// EMR release label. Can also list EMR release versions that support a specified -// version of Spark. +// Provides Amazon EMR release label details, such as the releases available +// the Region where the API request is run, and the available applications for +// a specific Amazon EMR release label. Can also list Amazon EMR releases that +// support a specified version of Spark. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2313,7 +2313,7 @@ func (c *EMR) ListInstanceFleetsRequest(input *ListInstanceFleetsInput) (req *re // // Lists all available details about the instance fleets in a cluster. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2594,10 +2594,10 @@ func (c *EMR) ListInstancesRequest(input *ListInstancesInput) (req *request.Requ // ListInstances API operation for Amazon EMR. // -// Provides information for all active EC2 instances and EC2 instances terminated -// in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the -// following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, -// BOOTSTRAPPING, RUNNING. +// Provides information for all active Amazon EC2 instances and Amazon EC2 instances +// terminated in the last 30 days, up to a maximum of 2,000. Amazon EC2 instances +// in any of the following states are considered active: AWAITING_FULFILLMENT, +// PROVISIONING, BOOTSTRAPPING, RUNNING. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2740,7 +2740,7 @@ func (c *EMR) ListNotebookExecutionsRequest(input *ListNotebookExecutionsInput) // Provides summaries of all notebook executions. You can filter the list based // on multiple criteria such as status, time range, and editor id. Returns a // maximum of 50 notebook executions and a marker to track the paging of a longer -// notebook execution list across multiple ListNotebookExecution calls. +// notebook execution list across multiple ListNotebookExecutions calls. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2880,7 +2880,8 @@ func (c *EMR) ListReleaseLabelsRequest(input *ListReleaseLabelsInput) (req *requ // ListReleaseLabels API operation for Amazon EMR. // -// Retrieves release labels of EMR services in the region where the API is called. +// Retrieves release labels of Amazon EMR services in the Region where the API +// is called. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3673,7 +3674,7 @@ func (c *EMR) ModifyInstanceFleetRequest(input *ModifyInstanceFleetInput) (req * // fleet with the specified InstanceFleetID within the cluster specified using // ClusterID. The call either succeeds or fails atomically. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3842,8 +3843,8 @@ func (c *EMR) PutAutoScalingPolicyRequest(input *PutAutoScalingPolicyInput) (req // // Creates or updates an automatic scaling policy for a core instance group // or task instance group in an Amazon EMR cluster. The automatic scaling policy -// defines how an instance group dynamically adds and terminates EC2 instances -// in response to the value of a CloudWatch metric. +// defines how an instance group dynamically adds and terminates Amazon EC2 +// instances in response to the value of a CloudWatch metric. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3917,7 +3918,7 @@ func (c *EMR) PutAutoTerminationPolicyRequest(input *PutAutoTerminationPolicyInp // PutAutoTerminationPolicy API operation for Amazon EMR. // -// Auto-termination is supported in Amazon EMR versions 5.30.0 and 6.1.0 and +// Auto-termination is supported in Amazon EMR releases 5.30.0 and 6.1.0 and // later. For more information, see Using an auto-termination policy (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-auto-termination-policy.html). // // Creates or updates an auto-termination policy for an Amazon EMR cluster. @@ -4085,9 +4086,10 @@ func (c *EMR) PutManagedScalingPolicyRequest(input *PutManagedScalingPolicyInput // PutManagedScalingPolicy API operation for Amazon EMR. // // Creates or updates a managed scaling policy for an Amazon EMR cluster. The -// managed scaling policy defines the limits for resources, such as EC2 instances -// that can be added or terminated from a cluster. The policy only applies to -// the core and task nodes. The master node cannot be scaled after initial configuration. +// managed scaling policy defines the limits for resources, such as Amazon EC2 +// instances that can be added or terminated from a cluster. The policy only +// applies to the core and task nodes. The master node cannot be scaled after +// initial configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4162,7 +4164,7 @@ func (c *EMR) RemoveAutoScalingPolicyRequest(input *RemoveAutoScalingPolicyInput // RemoveAutoScalingPolicy API operation for Amazon EMR. // // Removes an automatic scaling policy from a specified instance group within -// an EMR cluster. +// an Amazon EMR cluster. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4310,7 +4312,7 @@ func (c *EMR) RemoveManagedScalingPolicyRequest(input *RemoveManagedScalingPolic // RemoveManagedScalingPolicy API operation for Amazon EMR. // -// Removes a managed scaling policy from a specified EMR cluster. +// Removes a managed scaling policy from a specified Amazon EMR cluster. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4494,7 +4496,7 @@ func (c *EMR) RunJobFlowRequest(input *RunJobFlowInput) (req *request.Request, o // For long-running clusters, we recommend that you periodically store your // results. // -// The instance fleets configuration is available only in Amazon EMR versions +// The instance fleets configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain // InstanceFleets parameters or InstanceGroups parameters, but not both. // @@ -4576,12 +4578,12 @@ func (c *EMR) SetTerminationProtectionRequest(input *SetTerminationProtectionInp // SetTerminationProtection API operation for Amazon EMR. // -// SetTerminationProtection locks a cluster (job flow) so the EC2 instances +// SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances // in the cluster cannot be terminated by user intervention, an API call, or // in the event of a job-flow error. The cluster still terminates upon successful // completion of the job flow. Calling SetTerminationProtection on a cluster -// is similar to calling the Amazon EC2 DisableAPITermination API on all EC2 -// instances in a cluster. +// is similar to calling the Amazon EC2 DisableAPITermination API on all Amazon +// EC2 instances in a cluster. // // SetTerminationProtection is used to prevent accidental termination of a cluster // and to ensure that in the event of an error, the instances persist so that @@ -4674,21 +4676,21 @@ func (c *EMR) SetVisibleToAllUsersRequest(input *SetVisibleToAllUsersInput) (req // // The SetVisibleToAllUsers parameter is no longer supported. Your cluster may // be visible to all users in your account. To restrict cluster access using -// an IAM policy, see Identity and Access Management for EMR (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-access-iam.html). +// an IAM policy, see Identity and Access Management for Amazon EMR (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-access-IAM.html). // -// Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true, IAM -// principals in the Amazon Web Services account can perform EMR cluster actions -// that their IAM policies allow. When false, only the IAM principal that created -// the cluster and the Amazon Web Services account root user can perform EMR -// actions on the cluster, regardless of IAM permissions policies attached to -// other IAM principals. +// Sets the Cluster$VisibleToAllUsers value for an Amazon EMR cluster. When +// true, IAM principals in the Amazon Web Services account can perform Amazon +// EMR cluster actions that their IAM policies allow. When false, only the IAM +// principal that created the cluster and the Amazon Web Services account root +// user can perform Amazon EMR actions on the cluster, regardless of IAM permissions +// policies attached to other IAM principals. // // This action works on running clusters. When you create a cluster, use the // RunJobFlowInput$VisibleToAllUsers parameter. // -// For more information, see Understanding the EMR Cluster VisibleToAllUsers -// Setting (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) -// in the Amazon EMRManagement Guide. +// For more information, see Understanding the Amazon EMR Cluster VisibleToAllUsers +// Setting (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_IAM_emr-with-IAM.html#security_set_visible_to_all_users) +// in the Amazon EMR Management Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4936,7 +4938,7 @@ func (c *EMR) TerminateJobFlowsRequest(input *TerminateJobFlowsInput) (req *requ // TerminateJobFlows API operation for Amazon EMR. // // TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow -// is shut down, any step not yet completed is canceled and the EC2 instances +// is shut down, any step not yet completed is canceled and the Amazon EC2 instances // on which the cluster is running are stopped. Any log files not already saved // are uploaded to Amazon S3 if a LogUri was specified when the cluster was // created. @@ -5394,7 +5396,7 @@ type AddJobFlowStepsInput struct { // a combination of account ID, role name, and role type using the following // format: arn:partition:service:region:account:resource. // - // For example, arn:aws:iam::1234567890:role/ReadOnly is a correctly formatted + // For example, arn:aws:IAM::1234567890:role/ReadOnly is a correctly formatted // runtime role ARN. ExecutionRoleArn *string `min:"20" type:"string"` @@ -5665,12 +5667,12 @@ func (s *Application) SetVersion(v string) *Application { // An automatic scaling policy for a core instance group or task instance group // in an Amazon EMR cluster. An automatic scaling policy defines how an instance -// group dynamically adds and terminates EC2 instances in response to the value -// of a CloudWatch metric. See PutAutoScalingPolicy. +// group dynamically adds and terminates Amazon EC2 instances in response to +// the value of a CloudWatch metric. See PutAutoScalingPolicy. type AutoScalingPolicy struct { _ struct{} `type:"structure"` - // The upper and lower EC2 instance limits for an automatic scaling policy. + // The upper and lower Amazon EC2 instance limits for an automatic scaling policy. // Automatic scaling activity will not cause an instance group to grow above // or below these limits. // @@ -5746,12 +5748,12 @@ func (s *AutoScalingPolicy) SetRules(v []*ScalingRule) *AutoScalingPolicy { // An automatic scaling policy for a core instance group or task instance group // in an Amazon EMR cluster. The automatic scaling policy defines how an instance -// group dynamically adds and terminates EC2 instances in response to the value -// of a CloudWatch metric. See PutAutoScalingPolicy. +// group dynamically adds and terminates Amazon EC2 instances in response to +// the value of a CloudWatch metric. See PutAutoScalingPolicy. type AutoScalingPolicyDescription struct { _ struct{} `type:"structure"` - // The upper and lower EC2 instance limits for an automatic scaling policy. + // The upper and lower Amazon EC2 instance limits for an automatic scaling policy. // Automatic scaling activity will not cause an instance group to grow above // or below these limits. Constraints *ScalingConstraints `type:"structure"` @@ -5930,9 +5932,9 @@ type BlockPublicAccessConfiguration struct { _ struct{} `type:"structure"` // Indicates whether Amazon EMR block public access is enabled (true) or disabled - // (false). By default, the value is false for accounts that have created EMR - // clusters before July 2019. For accounts created after this, the default is - // true. + // (false). By default, the value is false for accounts that have created Amazon + // EMR clusters before July 2019. For accounts created after this, the default + // is true. // // BlockPublicSecurityGroupRules is a required field BlockPublicSecurityGroupRules *bool `type:"boolean" required:"true"` @@ -5944,8 +5946,8 @@ type BlockPublicAccessConfiguration struct { // cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 // or IPv6 port ::/0 as the source. // - // By default, Port 22, which is used for SSH access to the cluster EC2 instances, - // is in the list of PermittedPublicSecurityGroupRuleRanges. + // By default, Port 22, which is used for SSH access to the cluster Amazon EC2 + // instances, is in the list of PermittedPublicSecurityGroupRuleRanges. PermittedPublicSecurityGroupRuleRanges []*PortRange `type:"list"` } @@ -6333,9 +6335,9 @@ type CloudWatchAlarmDefinition struct { // The namespace for the CloudWatch metric. The default is AWS/ElasticMapReduce. Namespace *string `type:"string"` - // The period, in seconds, over which the statistic is applied. EMR CloudWatch - // metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch - // metric is specified, specify 300. + // The period, in seconds, over which the statistic is applied. CloudWatch metrics + // for Amazon EMR are emitted every five minutes (300 seconds), so if you specify + // a CloudWatch metric, specify 300. // // Period is a required field Period *int64 `type:"integer" required:"true"` @@ -6458,7 +6460,7 @@ type Cluster struct { // An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. // The IAM role provides permissions that the automatic scaling feature requires - // to launch and terminate EC2 instances in an instance group. + // to launch and terminate Amazon EC2 instances in an instance group. AutoScalingRole *string `type:"string"` // Specifies whether the cluster should terminate after completing all steps. @@ -6467,27 +6469,29 @@ type Cluster struct { // The Amazon Resource Name of the cluster. ClusterArn *string `min:"20" type:"string"` - // Applies only to Amazon EMR releases 4.x and later. The list of Configurations - // supplied to the EMR cluster. + // Applies only to Amazon EMR releases 4.x and later. The list of configurations + // that are supplied to the Amazon EMR cluster. Configurations []*Configuration `type:"list"` - // Available only in Amazon EMR version 5.7.0 and later. The ID of a custom + // Available only in Amazon EMR releases 5.7.0 and later. The ID of a custom // Amazon EBS-backed Linux AMI if the cluster uses a custom AMI. CustomAmiId *string `type:"string"` // The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that - // is used for each EC2 instance. Available in Amazon EMR version 4.x and later. + // is used for each Amazon EC2 instance. Available in Amazon EMR releases 4.x + // and later. EbsRootVolumeSize *int64 `type:"integer"` - // Provides information about the EC2 instances in a cluster grouped by category. - // For example, key name, subnet ID, IAM instance profile, and so on. + // Provides information about the Amazon EC2 instances in a cluster grouped + // by category. For example, key name, subnet ID, IAM instance profile, and + // so on. Ec2InstanceAttributes *Ec2InstanceAttributes `type:"structure"` // The unique identifier for the cluster. Id *string `type:"string"` // - // The instance fleet configuration is available only in Amazon EMR versions + // The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. // // The instance group configuration of the cluster. A value of INSTANCE_GROUP @@ -6502,7 +6506,7 @@ type Cluster struct { KerberosAttributes *KerberosAttributes `type:"structure"` // The KMS key used for encrypting log files. This attribute is only available - // with EMR version 5.30.0 and later, excluding EMR 6.0.0. + // with Amazon EMR 5.30.0 and later, excluding Amazon EMR 6.0.0. LogEncryptionKmsKeyId *string `type:"string"` // The path to the Amazon S3 location where logs for this cluster are stored. @@ -6517,8 +6521,8 @@ type Cluster struct { // An approximation of the cost of the cluster, represented in m1.small/hours. // This value is incremented one time for every hour an m1.small instance runs. - // Larger instances are weighted more, so an EC2 instance that is roughly four - // times more expensive would result in the normalized instance hours being + // Larger instances are weighted more, so an Amazon EC2 instance that is roughly + // four times more expensive would result in the normalized instance hours being // incremented by four. This result is only an approximation and does not reflect // the actual billing rate. NormalizedInstanceHours *int64 `type:"integer"` @@ -6545,8 +6549,8 @@ type Cluster struct { ReleaseLabel *string `type:"string"` // Applies only when CustomAmiID is used. Specifies the type of updates that - // are applied from the Amazon Linux AMI package repositories when an instance - // boots using the AMI. + // the Amazon Linux AMI package repositories apply when an instance boots using + // the AMI. RepoUpgradeOnBoot *string `type:"string" enum:"RepoUpgradeOnBoot"` // The AMI version requested for this cluster. @@ -6565,7 +6569,7 @@ type Cluster struct { // terminating the Amazon EC2 instances, regardless of the instance-hour boundary. // With either behavior, Amazon EMR removes the least active nodes first and // blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION - // is available only in Amazon EMR version 4.1.0 and later, and is the default + // is available only in Amazon EMR releases 4.1.0 and later, and is the default // for versions of Amazon EMR earlier than 5.1.0. ScaleDownBehavior *string `type:"string" enum:"ScaleDownBehavior"` @@ -6585,21 +6589,21 @@ type Cluster struct { // A list of tags associated with a cluster. Tags []*Tag `type:"list"` - // Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances - // from being terminated by an API call or user intervention, or in the event - // of a cluster error. + // Indicates whether Amazon EMR will lock the cluster to prevent the Amazon + // EC2 instances from being terminated by an API call or user intervention, + // or in the event of a cluster error. TerminationProtected *bool `type:"boolean"` // Indicates whether the cluster is visible to IAM principals in the Amazon // Web Services account associated with the cluster. When true, IAM principals - // in the Amazon Web Services account can perform EMR cluster actions on the - // cluster that their IAM policies allow. When false, only the IAM principal + // in the Amazon Web Services account can perform Amazon EMR cluster actions + // on the cluster that their IAM policies allow. When false, only the IAM principal // that created the cluster and the Amazon Web Services account root user can - // perform EMR actions, regardless of IAM permissions policies attached to other - // IAM principals. + // perform Amazon EMR actions, regardless of IAM permissions policies attached + // to other IAM principals. // // The default value is true if a value is not provided when creating a cluster - // using the EMR API RunJobFlow command, the CLI create-cluster (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) + // using the Amazon EMR API RunJobFlow command, the CLI create-cluster (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) // command, or the Amazon Web Services Management Console. VisibleToAllUsers *bool `type:"boolean"` } @@ -6853,8 +6857,8 @@ func (s *ClusterStateChangeReason) SetMessage(v string) *ClusterStateChangeReaso type ClusterStatus struct { _ struct{} `type:"structure"` - // A list of tuples that provide information about the errors that caused a - // cluster termination. This structure may have up to 10 different ErrorDetail + // A list of tuples that provides information about the errors that caused a + // cluster to terminate. This structure can contain up to 10 different ErrorDetail // tuples. ErrorDetails []*ErrorDetail `type:"list"` @@ -6926,8 +6930,8 @@ type ClusterSummary struct { // An approximation of the cost of the cluster, represented in m1.small/hours. // This value is incremented one time for every hour an m1.small instance runs. - // Larger instances are weighted more, so an EC2 instance that is roughly four - // times more expensive would result in the normalized instance hours being + // Larger instances are weighted more, so an Amazon EC2 instance that is roughly + // four times more expensive would result in the normalized instance hours being // incremented by four. This result is only an approximation and does not reflect // the actual billing rate. NormalizedInstanceHours *int64 `type:"integer"` @@ -7093,41 +7097,41 @@ func (s *Command) SetScriptPath(v string) *Command { return s } -// The EC2 unit limits for a managed scaling policy. The managed scaling activity -// of a cluster can not be above or below these limits. The limit only applies -// to the core and task nodes. The master node cannot be scaled after initial -// configuration. +// The Amazon EC2 unit limits for a managed scaling policy. The managed scaling +// activity of a cluster can not be above or below these limits. The limit only +// applies to the core and task nodes. The master node cannot be scaled after +// initial configuration. type ComputeLimits struct { _ struct{} `type:"structure"` - // The upper boundary of EC2 units. It is measured through vCPU cores or instances - // for instance groups and measured through units for instance fleets. Managed - // scaling activities are not allowed beyond this boundary. The limit only applies - // to the core and task nodes. The master node cannot be scaled after initial - // configuration. + // The upper boundary of Amazon EC2 units. It is measured through vCPU cores + // or instances for instance groups and measured through units for instance + // fleets. Managed scaling activities are not allowed beyond this boundary. + // The limit only applies to the core and task nodes. The master node cannot + // be scaled after initial configuration. // // MaximumCapacityUnits is a required field MaximumCapacityUnits *int64 `type:"integer" required:"true"` - // The upper boundary of EC2 units for core node type in a cluster. It is measured - // through vCPU cores or instances for instance groups and measured through - // units for instance fleets. The core units are not allowed to scale beyond - // this boundary. The parameter is used to split capacity allocation between - // core and task nodes. + // The upper boundary of Amazon EC2 units for core node type in a cluster. It + // is measured through vCPU cores or instances for instance groups and measured + // through units for instance fleets. The core units are not allowed to scale + // beyond this boundary. The parameter is used to split capacity allocation + // between core and task nodes. MaximumCoreCapacityUnits *int64 `type:"integer"` - // The upper boundary of On-Demand EC2 units. It is measured through vCPU cores - // or instances for instance groups and measured through units for instance - // fleets. The On-Demand units are not allowed to scale beyond this boundary. - // The parameter is used to split capacity allocation between On-Demand and - // Spot Instances. + // The upper boundary of On-Demand Amazon EC2 units. It is measured through + // vCPU cores or instances for instance groups and measured through units for + // instance fleets. The On-Demand units are not allowed to scale beyond this + // boundary. The parameter is used to split capacity allocation between On-Demand + // and Spot Instances. MaximumOnDemandCapacityUnits *int64 `type:"integer"` - // The lower boundary of EC2 units. It is measured through vCPU cores or instances - // for instance groups and measured through units for instance fleets. Managed - // scaling activities are not allowed beyond this boundary. The limit only applies - // to the core and task nodes. The master node cannot be scaled after initial - // configuration. + // The lower boundary of Amazon EC2 units. It is measured through vCPU cores + // or instances for instance groups and measured through units for instance + // fleets. Managed scaling activities are not allowed beyond this boundary. + // The limit only applies to the core and task nodes. The master node cannot + // be scaled after initial configuration. // // MinimumCapacityUnits is a required field MinimumCapacityUnits *int64 `type:"integer" required:"true"` @@ -7644,7 +7648,7 @@ type CreateStudioSessionMappingInput struct { // The Amazon Resource Name (ARN) for the session policy that will be applied // to the user or group. You should specify the ARN for the session policy that // you want to apply, not the ARN of your user role. For more information, see - // Create an EMR Studio User Role with Session Policies (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-studio-user-role.html). + // Create an Amazon EMR Studio User Role with Session Policies (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-studio-user-role.html). // // SessionPolicyArn is a required field SessionPolicyArn *string `type:"string" required:"true"` @@ -8671,7 +8675,7 @@ type EbsBlockDevice struct { Device *string `type:"string"` // EBS volume specifications such as volume type, IOPS, size (GiB) and throughput - // (MiB/s) that are requested for the EBS volume attached to an EC2 instance + // (MiB/s) that are requested for the EBS volume attached to an Amazon EC2 instance // in the cluster. VolumeSpecification *VolumeSpecification `type:"structure"` } @@ -8712,7 +8716,7 @@ type EbsBlockDeviceConfig struct { _ struct{} `type:"structure"` // EBS volume specifications such as volume type, IOPS, size (GiB) and throughput - // (MiB/s) that are requested for the EBS volume attached to an EC2 instance + // (MiB/s) that are requested for the EBS volume attached to an Amazon EC2 instance // in the cluster. // // VolumeSpecification is a required field @@ -8832,7 +8836,7 @@ func (s *EbsConfiguration) SetEbsOptimized(v bool) *EbsConfiguration { return s } -// EBS block device that's attached to an EC2 instance. +// EBS block device that's attached to an Amazon EC2 instance. type EbsVolume struct { _ struct{} `type:"structure"` @@ -8873,8 +8877,9 @@ func (s *EbsVolume) SetVolumeId(v string) *EbsVolume { return s } -// Provides information about the EC2 instances in a cluster grouped by category. -// For example, key name, subnet ID, IAM instance profile, and so on. +// Provides information about the Amazon EC2 instances in a cluster grouped +// by category. For example, key name, subnet ID, IAM instance profile, and +// so on. type Ec2InstanceAttributes struct { _ struct{} `type:"structure"` @@ -8903,14 +8908,14 @@ type Ec2InstanceAttributes struct { // The identifier of the Amazon EC2 security group for the core and task nodes. EmrManagedSlaveSecurityGroup *string `type:"string"` - // The IAM role that was specified when the cluster was launched. The EC2 instances - // of the cluster assume this role. + // The IAM role that was specified when the cluster was launched. The Amazon + // EC2 instances of the cluster assume this role. IamInstanceProfile *string `type:"string"` // Applies to clusters configured with the instance fleets option. Specifies - // one or more Availability Zones in which to launch EC2 cluster instances when - // the EC2-Classic network configuration is supported. Amazon EMR chooses the - // Availability Zone with the best fit from among the list of RequestedEc2AvailabilityZones, + // one or more Availability Zones in which to launch Amazon EC2 cluster instances + // when the EC2-Classic network configuration is supported. Amazon EMR chooses + // the Availability Zone with the best fit from among the list of RequestedEc2AvailabilityZones, // and then launches all cluster instances within that Availability Zone. If // you do not specify this value, Amazon EMR chooses the Availability Zone for // you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified @@ -8919,14 +8924,15 @@ type Ec2InstanceAttributes struct { // Applies to clusters configured with the instance fleets option. Specifies // the unique identifier of one or more Amazon EC2 subnets in which to launch - // EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR - // chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, - // and then launches all cluster instances within that Subnet. If this value - // is not specified, and the account and Region support EC2-Classic networks, - // the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones - // instead of this setting. If EC2-Classic is not supported, and no Subnet is - // specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and - // RequestedEc2AvailabilityZones cannot be specified together. + // Amazon EC2 cluster instances. Subnets must exist within the same VPC. Amazon + // EMR chooses the Amazon EC2 subnet with the best fit from among the list of + // RequestedEc2SubnetIds, and then launches all cluster instances within that + // Subnet. If this value is not specified, and the account and Region support + // EC2-Classic networks, the cluster launches instances in the EC2-Classic network + // and uses RequestedEc2AvailabilityZones instead of this setting. If EC2-Classic + // is not supported, and no Subnet is specified, Amazon EMR chooses the subnet + // for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be + // specified together. RequestedEc2SubnetIds []*string `type:"list"` // The identifier of the Amazon EC2 security group for the Amazon EMR service @@ -9023,14 +9029,14 @@ func (s *Ec2InstanceAttributes) SetServiceAccessSecurityGroup(v string) *Ec2Inst type ErrorDetail struct { _ struct{} `type:"structure"` - // The name or code that's associated with the error. + // The name or code associated with the error. ErrorCode *string `type:"string"` - // A list of key value pairs that provide contextual information to explain - // why the error may have occured. + // A list of key value pairs that provides contextual information about why + // an error occured. ErrorData []map[string]*string `type:"list"` - // A message describing the error that occured. + // A message that describes the error. ErrorMessage *string `type:"string"` } @@ -9071,23 +9077,27 @@ func (s *ErrorDetail) SetErrorMessage(v string) *ErrorDetail { } // Specifies the execution engine (cluster) to run the notebook and perform -// the notebook execution, for example, an EMR cluster. +// the notebook execution, for example, an Amazon EMR cluster. type ExecutionEngineConfig struct { _ struct{} `type:"structure"` - // The unique identifier of the execution engine. For an EMR cluster, this is - // the cluster ID. + // The execution role ARN required for the notebook execution. + ExecutionRoleArn *string `min:"20" type:"string"` + + // The unique identifier of the execution engine. For an Amazon EMR cluster, + // this is the cluster ID. // // Id is a required field Id *string `type:"string" required:"true"` - // An optional unique ID of an EC2 security group to associate with the master - // instance of the EMR cluster for this notebook execution. For more information - // see Specifying EC2 Security Groups for EMR Notebooks (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html) + // An optional unique ID of an Amazon EC2 security group to associate with the + // master instance of the Amazon EMR cluster for this notebook execution. For + // more information see Specifying Amazon EC2 Security Groups for Amazon EMR + // Notebooks (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html) // in the EMR Management Guide. MasterInstanceSecurityGroupId *string `type:"string"` - // The type of execution engine. A value of EMR specifies an EMR cluster. + // The type of execution engine. A value of EMR specifies an Amazon EMR cluster. Type *string `type:"string" enum:"ExecutionEngineType"` } @@ -9112,6 +9122,9 @@ func (s ExecutionEngineConfig) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ExecutionEngineConfig) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ExecutionEngineConfig"} + if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRoleArn", 20)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -9122,6 +9135,12 @@ func (s *ExecutionEngineConfig) Validate() error { return nil } +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *ExecutionEngineConfig) SetExecutionRoleArn(v string) *ExecutionEngineConfig { + s.ExecutionRoleArn = &v + return s +} + // SetId sets the Id field's value. func (s *ExecutionEngineConfig) SetId(v string) *ExecutionEngineConfig { s.Id = &v @@ -9312,8 +9331,8 @@ type GetBlockPublicAccessConfigurationOutput struct { // For accounts that created clusters in a Region before November 25, 2019, // block public access is disabled by default in that Region. To use this feature, // you must manually enable and configure it. For accounts that did not create - // an EMR cluster in a Region before this date, block public access is enabled - // by default in that Region. + // an Amazon EMR cluster in a Region before this date, block public access is + // enabled by default in that Region. // // BlockPublicAccessConfiguration is a required field BlockPublicAccessConfiguration *BlockPublicAccessConfiguration `type:"structure" required:"true"` @@ -9805,7 +9824,7 @@ func (s *HadoopStepConfig) SetProperties(v map[string]*string) *HadoopStepConfig return s } -// Represents an EC2 instance provisioned as part of cluster. +// Represents an Amazon EC2 instance provisioned as part of cluster. type Instance struct { _ struct{} `type:"structure"` @@ -9818,13 +9837,14 @@ type Instance struct { // The unique identifier for the instance in Amazon EMR. Id *string `type:"string"` - // The unique identifier of the instance fleet to which an EC2 instance belongs. + // The unique identifier of the instance fleet to which an Amazon EC2 instance + // belongs. InstanceFleetId *string `type:"string"` // The identifier of the instance group to which this instance belongs. InstanceGroupId *string `type:"string"` - // The EC2 instance type, for example m3.xlarge. + // The Amazon EC2 instance type, for example m3.xlarge. InstanceType *string `min:"1" type:"string"` // The instance purchasing option. Valid values are ON_DEMAND or SPOT. @@ -9936,12 +9956,12 @@ func (s *Instance) SetStatus(v *InstanceStatus) *Instance { return s } -// Describes an instance fleet, which is a group of EC2 instances that host -// a particular node type (master, core, or task) in an Amazon EMR cluster. +// Describes an instance fleet, which is a group of Amazon EC2 instances that +// host a particular node type (master, core, or task) in an Amazon EMR cluster. // Instance fleets can consist of a mix of instance types and On-Demand and // Spot Instances, which are provisioned to meet a defined target capacity. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. type InstanceFleet struct { _ struct{} `type:"structure"` @@ -10105,7 +10125,7 @@ func (s *InstanceFleet) SetTargetSpotCapacity(v int64) *InstanceFleet { // The configuration that defines an instance fleet. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. type InstanceFleetConfig struct { _ struct{} `type:"structure"` @@ -10116,8 +10136,8 @@ type InstanceFleetConfig struct { // InstanceFleetType is a required field InstanceFleetType *string `type:"string" required:"true" enum:"InstanceFleetType"` - // The instance type configurations that define the EC2 instances in the instance - // fleet. + // The instance type configurations that define the Amazon EC2 instances in + // the instance fleet. InstanceTypeConfigs []*InstanceTypeConfig `type:"list"` // The launch specification for the instance fleet. @@ -10260,7 +10280,7 @@ func (s *InstanceFleetConfig) SetTargetSpotCapacity(v int64) *InstanceFleetConfi // Configuration parameters for an instance fleet modification request. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. type InstanceFleetModifyConfig struct { _ struct{} `type:"structure"` @@ -10345,18 +10365,18 @@ func (s *InstanceFleetModifyConfig) SetTargetSpotCapacity(v int64) *InstanceFlee // The launch specification for Spot Instances in the fleet, which determines // the defined duration, provisioning timeout behavior, and allocation strategy. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. On-Demand and Spot instance allocation -// strategies are available in Amazon EMR version 5.12.1 and later. +// strategies are available in Amazon EMR releases 5.12.1 and later. type InstanceFleetProvisioningSpecifications struct { _ struct{} `type:"structure"` // The launch specification for On-Demand Instances in the instance fleet, which // determines the allocation strategy. // - // The instance fleet configuration is available only in Amazon EMR versions + // The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation - // strategy is available in Amazon EMR version 5.12.1 and later. + // strategy is available in Amazon EMR releases 5.12.1 and later. OnDemandSpecification *OnDemandProvisioningSpecification `type:"structure"` // The launch specification for Spot instances in the fleet, which determines @@ -10479,7 +10499,7 @@ func (s *InstanceFleetResizingSpecifications) SetSpotResizeSpecification(v *Spot // Provides status change reason details for the instance fleet. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. type InstanceFleetStateChangeReason struct { _ struct{} `type:"structure"` @@ -10523,32 +10543,32 @@ func (s *InstanceFleetStateChangeReason) SetMessage(v string) *InstanceFleetStat // The status of the instance fleet. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. type InstanceFleetStatus struct { _ struct{} `type:"structure"` // A code representing the instance fleet status. // - // * PROVISIONING—The instance fleet is provisioning EC2 resources and - // is not yet ready to run jobs. + // * PROVISIONING—The instance fleet is provisioning Amazon EC2 resources + // and is not yet ready to run jobs. // - // * BOOTSTRAPPING—EC2 instances and other resources have been provisioned + // * BOOTSTRAPPING—Amazon EC2 instances and other resources have been provisioned // and the bootstrap actions specified for the instances are underway. // - // * RUNNING—EC2 instances and other resources are running. They are either - // executing jobs or waiting to execute jobs. + // * RUNNING—Amazon EC2 instances and other resources are running. They + // are either executing jobs or waiting to execute jobs. // - // * RESIZING—A resize operation is underway. EC2 instances are either - // being added or removed. + // * RESIZING—A resize operation is underway. Amazon EC2 instances are + // either being added or removed. // - // * SUSPENDED—A resize operation could not complete. Existing EC2 instances - // are running, but instances can't be added or removed. + // * SUSPENDED—A resize operation could not complete. Existing Amazon EC2 + // instances are running, but instances can't be added or removed. // - // * TERMINATING—The instance fleet is terminating EC2 instances. + // * TERMINATING—The instance fleet is terminating Amazon EC2 instances. // - // * TERMINATED—The instance fleet is no longer active, and all EC2 instances - // have been terminated. + // * TERMINATED—The instance fleet is no longer active, and all Amazon + // EC2 instances have been terminated. State *string `type:"string" enum:"InstanceFleetState"` // Provides status change reason details for the instance fleet. @@ -10598,7 +10618,7 @@ func (s *InstanceFleetStatus) SetTimeline(v *InstanceFleetTimeline) *InstanceFle // Provides historical timestamps for the instance fleet, including the time // of creation, the time it became ready to run jobs, and the time of termination. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. type InstanceFleetTimeline struct { _ struct{} `type:"structure"` @@ -10656,8 +10676,8 @@ type InstanceGroup struct { // An automatic scaling policy for a core instance group or task instance group // in an Amazon EMR cluster. The automatic scaling policy defines how an instance - // group dynamically adds and terminates EC2 instances in response to the value - // of a CloudWatch metric. See PutAutoScalingPolicy. + // group dynamically adds and terminates Amazon EC2 instances in response to + // the value of a CloudWatch metric. See PutAutoScalingPolicy. AutoScalingPolicy *AutoScalingPolicyDescription `type:"structure"` // If specified, indicates that the instance group uses Spot Instances. This @@ -10694,7 +10714,7 @@ type InstanceGroup struct { // The type of the instance group. Valid values are MASTER, CORE or TASK. InstanceGroupType *string `type:"string" enum:"InstanceGroupType"` - // The EC2 instance type for all instances in the instance group. + // The Amazon EC2 instance type for all instances in the instance group. InstanceType *string `min:"1" type:"string"` // A list of configurations that were successfully applied for an instance group @@ -10857,8 +10877,8 @@ type InstanceGroupConfig struct { // An automatic scaling policy for a core instance group or task instance group // in an Amazon EMR cluster. The automatic scaling policy defines how an instance - // group dynamically adds and terminates EC2 instances in response to the value - // of a CloudWatch metric. See PutAutoScalingPolicy. + // group dynamically adds and terminates Amazon EC2 instances in response to + // the value of a CloudWatch metric. See PutAutoScalingPolicy. AutoScalingPolicy *AutoScalingPolicy `type:"structure"` // If specified, indicates that the instance group uses Spot Instances. This @@ -10869,16 +10889,16 @@ type InstanceGroupConfig struct { // // Amazon EMR releases 4.x or later. // - // The list of configurations supplied for an EMR cluster instance group. You - // can specify a separate configuration for each instance group (master, core, - // and task). + // The list of configurations supplied for an Amazon EMR cluster instance group. + // You can specify a separate configuration for each instance group (master, + // core, and task). Configurations []*Configuration `type:"list"` // The custom AMI ID to use for the provisioned instance group. CustomAmiId *string `type:"string"` - // EBS configurations that will be attached to each EC2 instance in the instance - // group. + // EBS configurations that will be attached to each Amazon EC2 instance in the + // instance group. EbsConfiguration *EbsConfiguration `type:"structure"` // Target number of instances for the instance group. @@ -10891,12 +10911,12 @@ type InstanceGroupConfig struct { // InstanceRole is a required field InstanceRole *string `type:"string" required:"true" enum:"InstanceRoleType"` - // The EC2 instance type for all instances in the instance group. + // The Amazon EC2 instance type for all instances in the instance group. // // InstanceType is a required field InstanceType *string `min:"1" type:"string" required:"true"` - // Market type of the EC2 instances used to create a cluster node. + // Market type of the Amazon EC2 instances used to create a cluster node. Market *string `type:"string" enum:"MarketType"` // Friendly name given to the instance group. @@ -11051,7 +11071,7 @@ type InstanceGroupDetail struct { // InstanceRunningCount is a required field InstanceRunningCount *int64 `type:"integer" required:"true"` - // EC2 instance type. + // Amazon EC2 instance type. // // InstanceType is a required field InstanceType *string `min:"1" type:"string" required:"true"` @@ -11059,7 +11079,7 @@ type InstanceGroupDetail struct { // Details regarding the state of the instance group. LastStateChangeReason *string `type:"string"` - // Market type of the EC2 instances used to create a cluster node. + // Market type of the Amazon EC2 instances used to create a cluster node. // // Market is a required field Market *string `type:"string" required:"true" enum:"MarketType"` @@ -11195,8 +11215,8 @@ type InstanceGroupModifyConfig struct { // A list of new or modified configurations to apply for an instance group. Configurations []*Configuration `type:"list"` - // The EC2 InstanceIds to terminate. After you terminate the instances, the - // instance group will not return to its original requested size. + // The Amazon EC2 InstanceIds to terminate. After you terminate the instances, + // the instance group will not return to its original requested size. EC2InstanceIdsToTerminate []*string `type:"list"` // Target size for the instance group. @@ -11616,28 +11636,28 @@ func (s *InstanceTimeline) SetReadyDateTime(v time.Time) *InstanceTimeline { } // An instance type configuration for each instance type in an instance fleet, -// which determines the EC2 instances Amazon EMR attempts to provision to fulfill -// On-Demand and Spot target capacities. When you use an allocation strategy, -// you can include a maximum of 30 instance type configurations for a fleet. -// For more information about how to use an allocation strategy, see Configure -// Instance Fleets (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-fleet.html). +// which determines the Amazon EC2 instances Amazon EMR attempts to provision +// to fulfill On-Demand and Spot target capacities. When you use an allocation +// strategy, you can include a maximum of 30 instance type configurations for +// a fleet. For more information about how to use an allocation strategy, see +// Configure Instance Fleets (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-fleet.html). // Without an allocation strategy, you may specify a maximum of five instance // type configurations for a fleet. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. type InstanceTypeConfig struct { _ struct{} `type:"structure"` - // The bid price for each EC2 Spot Instance type as defined by InstanceType. + // The bid price for each Amazon EC2 Spot Instance type as defined by InstanceType. // Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice // is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. BidPrice *string `type:"string"` - // The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance - // as defined by InstanceType. Expressed as a number (for example, 20 specifies - // 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, - // BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + // The bid price, as a percentage of On-Demand price, for each Amazon EC2 Spot + // Instance as defined by InstanceType. Expressed as a number (for example, + // 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice + // is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. BidPriceAsPercentageOfOnDemandPrice *float64 `type:"double"` // A configuration classification that applies when provisioning cluster instances, @@ -11652,7 +11672,7 @@ type InstanceTypeConfig struct { // each instance as defined by InstanceType. EbsConfiguration *EbsConfiguration `type:"structure"` - // An EC2 instance type, such as m3.xlarge. + // An Amazon EC2 instance type, such as m3.xlarge. // // InstanceType is a required field InstanceType *string `min:"1" type:"string" required:"true"` @@ -11747,18 +11767,18 @@ func (s *InstanceTypeConfig) SetWeightedCapacity(v int64) *InstanceTypeConfig { // The configuration specification for each instance type in an instance fleet. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. type InstanceTypeSpecification struct { _ struct{} `type:"structure"` - // The bid price for each EC2 Spot Instance type as defined by InstanceType. + // The bid price for each Amazon EC2 Spot Instance type as defined by InstanceType. // Expressed in USD. BidPrice *string `type:"string"` - // The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance - // as defined by InstanceType. Expressed as a number (for example, 20 specifies - // 20%). + // The bid price, as a percentage of On-Demand price, for each Amazon EC2 Spot + // Instance as defined by InstanceType. Expressed as a number (for example, + // 20 specifies 20%). BidPriceAsPercentageOfOnDemandPrice *float64 `type:"double"` // A configuration classification that applies when provisioning cluster instances, @@ -11776,7 +11796,7 @@ type InstanceTypeSpecification struct { // Evaluates to TRUE when the specified InstanceType is EBS-optimized. EbsOptimized *bool `type:"boolean"` - // The EC2 instance type, for example m3.xlarge. + // The Amazon EC2 instance type, for example m3.xlarge. InstanceType *string `min:"1" type:"string"` // The number of units that a provisioned instance of this type provides toward @@ -12061,8 +12081,8 @@ type JobFlowDetail struct { // An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. // The IAM role provides a way for the automatic scaling feature to get the - // required permissions it needs to launch and terminate EC2 instances in an - // instance group. + // required permissions it needs to launch and terminate Amazon EC2 instances + // in an instance group. AutoScalingRole *string `type:"string"` // A list of the bootstrap actions run by the job flow. @@ -12083,12 +12103,12 @@ type JobFlowDetail struct { // JobFlowId is a required field JobFlowId *string `type:"string" required:"true"` - // The IAM role that was specified when the job flow was launched. The EC2 instances - // of the job flow assume this role. + // The IAM role that was specified when the job flow was launched. The Amazon + // EC2 instances of the job flow assume this role. JobFlowRole *string `type:"string"` // The KMS key used for encrypting log files. This attribute is only available - // with EMR version 5.30.0 and later, excluding EMR 6.0.0. + // with Amazon EMR 5.30.0 and later, excluding 6.0.0. LogEncryptionKmsKeyId *string `type:"string"` // The location in Amazon S3 where log files for the job are stored. @@ -12109,8 +12129,8 @@ type JobFlowDetail struct { // terminating the Amazon EC2 instances, regardless of the instance-hour boundary. // With either behavior, Amazon EMR removes the least active nodes first and // blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION - // available only in Amazon EMR version 4.1.0 and later, and is the default - // for versions of Amazon EMR earlier than 5.1.0. + // available only in Amazon EMR releases 4.1.0 and later, and is the default + // for releases of Amazon EMR earlier than 5.1.0. ScaleDownBehavior *string `type:"string" enum:"ScaleDownBehavior"` // The IAM role that is assumed by the Amazon EMR service to access Amazon Web @@ -12127,13 +12147,14 @@ type JobFlowDetail struct { // Indicates whether the cluster is visible to IAM principals in the Amazon // Web Services account associated with the cluster. When true, IAM principals - // in the Amazon Web Services account can perform EMR cluster actions that their - // IAM policies allow. When false, only the IAM principal that created the cluster - // and the Amazon Web Services account root user can perform EMR actions, regardless - // of IAM permissions policies attached to other IAM principals. + // in the Amazon Web Services account can perform Amazon EMR cluster actions + // that their IAM policies allow. When false, only the IAM principal that created + // the cluster and the Amazon Web Services account root user can perform Amazon + // EMR actions, regardless of IAM permissions policies attached to other IAM + // principals. // // The default value is true if a value is not provided when creating a cluster - // using the EMR API RunJobFlow command, the CLI create-cluster (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) + // using the Amazon EMR API RunJobFlow command, the CLI create-cluster (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) // command, or the Amazon Web Services Management Console. VisibleToAllUsers *bool `type:"boolean"` } @@ -12343,8 +12364,8 @@ type JobFlowInstancesConfig struct { // nodes. AdditionalSlaveSecurityGroups []*string `type:"list"` - // The name of the EC2 key pair that can be used to connect to the master node - // using SSH as the user called "hadoop." + // The name of the Amazon EC2 key pair that can be used to connect to the master + // node using SSH as the user called "hadoop." Ec2KeyName *string `type:"string"` // Applies to clusters that use the uniform instance group configuration. To @@ -12355,10 +12376,10 @@ type JobFlowInstancesConfig struct { Ec2SubnetId *string `type:"string"` // Applies to clusters that use the instance fleet configuration. When multiple - // EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances - // in the optimal subnet. + // Amazon EC2 subnet IDs are specified, Amazon EMR evaluates them and launches + // instances in the optimal subnet. // - // The instance fleet configuration is available only in Amazon EMR versions + // The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. Ec2SubnetIds []*string `type:"list"` @@ -12378,15 +12399,15 @@ type JobFlowInstancesConfig struct { // case the default version of Hadoop for that AMI version is used. HadoopVersion *string `type:"string"` - // The number of EC2 instances in the cluster. + // The number of Amazon EC2 instances in the cluster. InstanceCount *int64 `type:"integer"` // - // The instance fleet configuration is available only in Amazon EMR versions + // The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. // - // Describes the EC2 instances and instance configurations for clusters that - // use the instance fleet configuration. + // Describes the Amazon EC2 instances and instance configurations for clusters + // that use the instance fleet configuration. InstanceFleets []*InstanceFleetConfig `type:"list"` // Configuration for the instance groups in a cluster. @@ -12398,7 +12419,7 @@ type JobFlowInstancesConfig struct { // in the EMR Management Guide. KeepJobFlowAliveWhenNoSteps *bool `type:"boolean"` - // The EC2 instance type of the master node. + // The Amazon EC2 instance type of the master node. MasterInstanceType *string `min:"1" type:"string"` // The Availability Zone in which the cluster runs. @@ -12408,7 +12429,7 @@ type JobFlowInstancesConfig struct { // to access clusters in VPC private subnets. ServiceAccessSecurityGroup *string `type:"string"` - // The EC2 instance type of the core and task nodes. + // The Amazon EC2 instance type of the core and task nodes. SlaveInstanceType *string `min:"1" type:"string"` // Specifies whether to lock the cluster to prevent the Amazon EC2 instances @@ -13414,6 +13435,9 @@ type ListNotebookExecutionsInput struct { // The unique ID of the editor associated with the notebook execution. EditorId *string `type:"string"` + // The unique ID of the execution engine. + ExecutionEngineId *string `type:"string"` + // The beginning of time range filter for listing notebook executions. The default // is the timestamp of 30 days ago. From *time.Time `type:"timestamp"` @@ -13479,6 +13503,12 @@ func (s *ListNotebookExecutionsInput) SetEditorId(v string) *ListNotebookExecuti return s } +// SetExecutionEngineId sets the ExecutionEngineId field's value. +func (s *ListNotebookExecutionsInput) SetExecutionEngineId(v string) *ListNotebookExecutionsInput { + s.ExecutionEngineId = &v + return s +} + // SetFrom sets the From field's value. func (s *ListNotebookExecutionsInput) SetFrom(v time.Time) *ListNotebookExecutionsInput { s.From = &v @@ -14023,10 +14053,10 @@ func (s *ListStudiosOutput) SetStudios(v []*StudioSummary) *ListStudiosOutput { type ManagedScalingPolicy struct { _ struct{} `type:"structure"` - // The EC2 unit limits for a managed scaling policy. The managed scaling activity - // of a cluster is not allowed to go above or below these limits. The limit - // only applies to the core and task nodes. The master node cannot be scaled - // after initial configuration. + // The Amazon EC2 unit limits for a managed scaling policy. The managed scaling + // activity of a cluster is not allowed to go above or below these limits. The + // limit only applies to the core and task nodes. The master node cannot be + // scaled after initial configuration. ComputeLimits *ComputeLimits `type:"structure"` } @@ -14373,22 +14403,26 @@ func (s ModifyInstanceGroupsOutput) GoString() string { return s.String() } -// A notebook execution. An execution is a specific instance that an EMR Notebook -// is run using the StartNotebookExecution action. +// A notebook execution. An execution is a specific instance that an Amazon +// EMR Notebook is run using the StartNotebookExecution action. type NotebookExecution struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the notebook execution. Arn *string `type:"string"` - // The unique identifier of the EMR Notebook that is used for the notebook execution. + // The unique identifier of the Amazon EMR Notebook that is used for the notebook + // execution. EditorId *string `type:"string"` // The timestamp when notebook execution ended. EndTime *time.Time `type:"timestamp"` - // The execution engine, such as an EMR cluster, used to run the EMR notebook - // and perform the notebook execution. + // The environment variables associated with the notebook execution. + EnvironmentVariables map[string]*string `type:"map"` + + // The execution engine, such as an Amazon EMR cluster, used to run the Amazon + // EMR notebook and perform the notebook execution. ExecutionEngine *ExecutionEngineConfig `type:"structure"` // The reason for the latest status change of the notebook execution. @@ -14400,16 +14434,25 @@ type NotebookExecution struct { // A name for the notebook execution. NotebookExecutionName *string `type:"string"` - // The unique identifier of the EC2 security group associated with the EMR Notebook - // instance. For more information see Specifying EC2 Security Groups for EMR - // Notebooks (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html) - // in the EMR Management Guide. + // The unique identifier of the Amazon EC2 security group associated with the + // Amazon EMR Notebook instance. For more information see Specifying Amazon + // EC2 Security Groups for Amazon EMR Notebooks (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html) + // in the Amazon EMR Management Guide. NotebookInstanceSecurityGroupId *string `type:"string"` - // Input parameters in JSON format passed to the EMR Notebook at runtime for - // execution. + // Input parameters in JSON format passed to the Amazon EMR Notebook at runtime + // for execution. NotebookParams *string `type:"string"` + // The Amazon S3 location that stores the notebook execution input. + NotebookS3Location *NotebookS3LocationForOutput_ `type:"structure"` + + // The output format for the notebook execution. + OutputNotebookFormat *string `type:"string" enum:"OutputNotebookFormat"` + + // The Amazon S3 location for the notebook execution output. + OutputNotebookS3Location *OutputNotebookS3LocationForOutput_ `type:"structure"` + // The location of the notebook execution's output file in Amazon S3. OutputNotebookURI *string `type:"string"` @@ -14486,6 +14529,12 @@ func (s *NotebookExecution) SetEndTime(v time.Time) *NotebookExecution { return s } +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *NotebookExecution) SetEnvironmentVariables(v map[string]*string) *NotebookExecution { + s.EnvironmentVariables = v + return s +} + // SetExecutionEngine sets the ExecutionEngine field's value. func (s *NotebookExecution) SetExecutionEngine(v *ExecutionEngineConfig) *NotebookExecution { s.ExecutionEngine = v @@ -14522,6 +14571,24 @@ func (s *NotebookExecution) SetNotebookParams(v string) *NotebookExecution { return s } +// SetNotebookS3Location sets the NotebookS3Location field's value. +func (s *NotebookExecution) SetNotebookS3Location(v *NotebookS3LocationForOutput_) *NotebookExecution { + s.NotebookS3Location = v + return s +} + +// SetOutputNotebookFormat sets the OutputNotebookFormat field's value. +func (s *NotebookExecution) SetOutputNotebookFormat(v string) *NotebookExecution { + s.OutputNotebookFormat = &v + return s +} + +// SetOutputNotebookS3Location sets the OutputNotebookS3Location field's value. +func (s *NotebookExecution) SetOutputNotebookS3Location(v *OutputNotebookS3LocationForOutput_) *NotebookExecution { + s.OutputNotebookS3Location = v + return s +} + // SetOutputNotebookURI sets the OutputNotebookURI field's value. func (s *NotebookExecution) SetOutputNotebookURI(v string) *NotebookExecution { s.OutputNotebookURI = &v @@ -14557,12 +14624,18 @@ type NotebookExecutionSummary struct { // The timestamp when notebook execution started. EndTime *time.Time `type:"timestamp"` + // The unique ID of the execution engine for the notebook execution. + ExecutionEngineId *string `type:"string"` + // The unique identifier of the notebook execution. NotebookExecutionId *string `type:"string"` // The name of the notebook execution. NotebookExecutionName *string `type:"string"` + // The Amazon S3 location that stores the notebook execution input. + NotebookS3Location *NotebookS3LocationForOutput_ `type:"structure"` + // The timestamp when notebook execution started. StartTime *time.Time `type:"timestamp"` @@ -14625,6 +14698,12 @@ func (s *NotebookExecutionSummary) SetEndTime(v time.Time) *NotebookExecutionSum return s } +// SetExecutionEngineId sets the ExecutionEngineId field's value. +func (s *NotebookExecutionSummary) SetExecutionEngineId(v string) *NotebookExecutionSummary { + s.ExecutionEngineId = &v + return s +} + // SetNotebookExecutionId sets the NotebookExecutionId field's value. func (s *NotebookExecutionSummary) SetNotebookExecutionId(v string) *NotebookExecutionSummary { s.NotebookExecutionId = &v @@ -14637,6 +14716,12 @@ func (s *NotebookExecutionSummary) SetNotebookExecutionName(v string) *NotebookE return s } +// SetNotebookS3Location sets the NotebookS3Location field's value. +func (s *NotebookExecutionSummary) SetNotebookS3Location(v *NotebookS3LocationForOutput_) *NotebookExecutionSummary { + s.NotebookS3Location = v + return s +} + // SetStartTime sets the StartTime field's value. func (s *NotebookExecutionSummary) SetStartTime(v time.Time) *NotebookExecutionSummary { s.StartTime = &v @@ -14649,6 +14734,101 @@ func (s *NotebookExecutionSummary) SetStatus(v string) *NotebookExecutionSummary return s } +// The Amazon S3 location that stores the notebook execution input. +type NotebookS3LocationForOutput_ struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket that stores the notebook execution input. + Bucket *string `type:"string"` + + // The key to the Amazon S3 location that stores the notebook execution input. + Key *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotebookS3LocationForOutput_) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotebookS3LocationForOutput_) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *NotebookS3LocationForOutput_) SetBucket(v string) *NotebookS3LocationForOutput_ { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *NotebookS3LocationForOutput_) SetKey(v string) *NotebookS3LocationForOutput_ { + s.Key = &v + return s +} + +// The Amazon S3 location that stores the notebook execution input. +type NotebookS3LocationFromInput_ struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket that stores the notebook execution input. + Bucket *string `type:"string"` + + // The key to the Amazon S3 location that stores the notebook execution input. + Key *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotebookS3LocationFromInput_) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotebookS3LocationFromInput_) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotebookS3LocationFromInput_) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotebookS3LocationFromInput_"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *NotebookS3LocationFromInput_) SetBucket(v string) *NotebookS3LocationFromInput_ { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *NotebookS3LocationFromInput_) SetKey(v string) *NotebookS3LocationFromInput_ { + s.Key = &v + return s +} + // The Amazon Linux release specified for a cluster in the RunJobFlow request. type OSRelease struct { _ struct{} `type:"structure"` @@ -14756,9 +14936,9 @@ func (s *OnDemandCapacityReservationOptions) SetUsageStrategy(v string) *OnDeman // The launch specification for On-Demand Instances in the instance fleet, which // determines the allocation strategy. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation -// strategy is available in Amazon EMR version 5.12.1 and later. +// strategy is available in Amazon EMR releases 5.12.1 and later. type OnDemandProvisioningSpecification struct { _ struct{} `type:"structure"` @@ -14871,26 +15051,121 @@ func (s *OnDemandResizingSpecification) SetTimeoutDurationMinutes(v int64) *OnDe return s } +// The Amazon S3 location that stores the notebook execution output. +type OutputNotebookS3LocationForOutput_ struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket that stores the notebook execution output. + Bucket *string `type:"string"` + + // The key to the Amazon S3 location that stores the notebook execution output. + Key *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputNotebookS3LocationForOutput_) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputNotebookS3LocationForOutput_) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *OutputNotebookS3LocationForOutput_) SetBucket(v string) *OutputNotebookS3LocationForOutput_ { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *OutputNotebookS3LocationForOutput_) SetKey(v string) *OutputNotebookS3LocationForOutput_ { + s.Key = &v + return s +} + +// The Amazon S3 location that stores the notebook execution output. +type OutputNotebookS3LocationFromInput_ struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket that stores the notebook execution output. + Bucket *string `type:"string"` + + // The key to the Amazon S3 location that stores the notebook execution output. + Key *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputNotebookS3LocationFromInput_) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputNotebookS3LocationFromInput_) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputNotebookS3LocationFromInput_) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputNotebookS3LocationFromInput_"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *OutputNotebookS3LocationFromInput_) SetBucket(v string) *OutputNotebookS3LocationFromInput_ { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *OutputNotebookS3LocationFromInput_) SetKey(v string) *OutputNotebookS3LocationFromInput_ { + s.Key = &v + return s +} + // Placement group configuration for an Amazon EMR cluster. The configuration // specifies the placement strategy that can be applied to instance roles during // cluster creation. // // To use this configuration, consider attaching managed policy AmazonElasticMapReducePlacementGroupPolicy -// to the EMR role. +// to the Amazon EMR role. type PlacementGroupConfig struct { _ struct{} `type:"structure"` // Role of the instance in the cluster. // - // Starting with Amazon EMR version 5.23.0, the only supported instance role + // Starting with Amazon EMR release 5.23.0, the only supported instance role // is MASTER. // // InstanceRole is a required field InstanceRole *string `type:"string" required:"true" enum:"InstanceRoleType"` - // EC2 Placement Group strategy associated with instance role. + // Amazon EC2 Placement Group strategy associated with instance role. // - // Starting with Amazon EMR version 5.23.0, the only supported placement strategy + // Starting with Amazon EMR release 5.23.0, the only supported placement strategy // is SPREAD for the MASTER instance role. PlacementStrategy *string `type:"string" enum:"PlacementGroupStrategy"` } @@ -14952,7 +15227,7 @@ type PlacementType struct { // is used for instance fleets, while AvailabilityZone (singular) is used for // uniform instance groups. // - // The instance fleet configuration is available only in Amazon EMR versions + // The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. AvailabilityZones []*string `type:"list"` } @@ -15285,8 +15560,8 @@ type PutBlockPublicAccessConfigurationInput struct { // For accounts that created clusters in a Region before November 25, 2019, // block public access is disabled by default in that Region. To use this feature, // you must manually enable and configure it. For accounts that did not create - // an EMR cluster in a Region before this date, block public access is enabled - // by default in that Region. + // an Amazon EMR cluster in a Region before this date, block public access is + // enabled by default in that Region. // // BlockPublicAccessConfiguration is a required field BlockPublicAccessConfiguration *BlockPublicAccessConfiguration `type:"structure" required:"true"` @@ -15359,7 +15634,8 @@ func (s PutBlockPublicAccessConfigurationOutput) GoString() string { type PutManagedScalingPolicyInput struct { _ struct{} `type:"structure"` - // Specifies the ID of an EMR cluster where the managed scaling policy is attached. + // Specifies the ID of an Amazon EMR cluster where the managed scaling policy + // is attached. // // ClusterId is a required field ClusterId *string `type:"string" required:"true"` @@ -15809,7 +16085,7 @@ type RunJobFlowInput struct { // An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. // The IAM role provides permissions that the automatic scaling feature requires - // to launch and terminate EC2 instances in an instance group. + // to launch and terminate Amazon EC2 instances in an instance group. AutoScalingRole *string `type:"string"` // An auto-termination policy for an Amazon EMR cluster. An auto-termination @@ -15822,15 +16098,15 @@ type RunJobFlowInput struct { BootstrapActions []*BootstrapActionConfig `type:"list"` // For Amazon EMR releases 4.0 and later. The list of configurations supplied - // for the EMR cluster you are creating. + // for the Amazon EMR cluster that you are creating. Configurations []*Configuration `type:"list"` - // Available only in Amazon EMR version 5.7.0 and later. The ID of a custom + // Available only in Amazon EMR releases 5.7.0 and later. The ID of a custom // Amazon EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when - // it launches cluster EC2 instances. For more information about custom AMIs - // in Amazon EMR, see Using a Custom AMI (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-custom-ami.html) + // it launches cluster Amazon EC2 instances. For more information about custom + // AMIs in Amazon EMR, see Using a Custom AMI (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-custom-ami.html) // in the Amazon EMR Management Guide. If omitted, the cluster uses the base - // Linux AMI for the ReleaseLabel specified. For Amazon EMR versions 2.x and + // Linux AMI for the ReleaseLabel specified. For Amazon EMR releases 2.x and // 3.x, use AmiVersion instead. // // For information about creating a custom AMI, see Creating an Amazon EBS-Backed @@ -15840,7 +16116,8 @@ type RunJobFlowInput struct { CustomAmiId *string `type:"string"` // The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that - // is used for each EC2 instance. Available in Amazon EMR version 4.x and later. + // is used for each Amazon EC2 instance. Available in Amazon EMR releases 4.x + // and later. EbsRootVolumeSize *int64 `type:"integer"` // A specification of the number and type of Amazon EC2 instances. @@ -15848,10 +16125,10 @@ type RunJobFlowInput struct { // Instances is a required field Instances *JobFlowInstancesConfig `type:"structure" required:"true"` - // Also called instance profile and EC2 role. An IAM role for an EMR cluster. - // The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole. - // In order to use the default role, you must have already created it using - // the CLI or console. + // Also called instance profile and Amazon EC2 role. An IAM role for an Amazon + // EMR cluster. The Amazon EC2 instances of the cluster assume this role. The + // default role is EMR_EC2_DefaultRole. In order to use the default role, you + // must have already created it using the CLI or console. JobFlowRole *string `type:"string"` // Attributes for Kerberos configuration when Kerberos authentication is enabled @@ -15862,7 +16139,7 @@ type RunJobFlowInput struct { // The KMS key used for encrypting log files. If a value is not provided, the // logs remain encrypted by AES-256. This attribute is only available with Amazon - // EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0. + // EMR releases 5.30.0 and later, excluding Amazon EMR 6.0.0. LogEncryptionKmsKeyId *string `type:"string"` // The location in Amazon S3 to write the log files of the job flow. If a value @@ -15882,10 +16159,10 @@ type RunJobFlowInput struct { // use Applications. // // A list of strings that indicates third-party software to use with the job - // flow that accepts a user argument list. EMR accepts and forwards the argument - // list to the corresponding installation script as bootstrap action arguments. - // For more information, see "Launch a Job Flow on the MapR Distribution for - // Hadoop" in the Amazon EMR Developer Guide (https://docs.aws.amazon.com/emr/latest/DeveloperGuide/emr-dg.pdf). + // flow that accepts a user argument list. Amazon EMR accepts and forwards the + // argument list to the corresponding installation script as bootstrap action + // arguments. For more information, see "Launch a Job Flow on the MapR Distribution + // for Hadoop" in the Amazon EMR Developer Guide (https://docs.aws.amazon.com/emr/latest/DeveloperGuide/emr-dg.pdf). // Supported values are: // // * "mapr-m3" - launch the cluster using MapR M3 Edition. @@ -15941,8 +16218,8 @@ type RunJobFlowInput struct { // terminating the Amazon EC2 instances, regardless of the instance-hour boundary. // With either behavior, Amazon EMR removes the least active nodes first and // blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION - // available only in Amazon EMR version 4.1.0 and later, and is the default - // for versions of Amazon EMR earlier than 5.1.0. + // available only in Amazon EMR releases 4.1.0 and later, and is the default + // for releases of Amazon EMR earlier than 5.1.0. ScaleDownBehavior *string `type:"string" enum:"ScaleDownBehavior"` // The name of a security configuration to apply to the cluster. @@ -15981,17 +16258,17 @@ type RunJobFlowInput struct { // is set to true. Setting it to false now has no effect. // // Set this value to true so that IAM principals in the Amazon Web Services - // account associated with the cluster can perform EMR actions on the cluster - // that their IAM policies allow. This value defaults to true for clusters created - // using the EMR API or the CLI create-cluster (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) + // account associated with the cluster can perform Amazon EMR actions on the + // cluster that their IAM policies allow. This value defaults to true for clusters + // created using the Amazon EMR API or the CLI create-cluster (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) // command. // // When set to false, only the IAM principal that created the cluster and the - // Amazon Web Services account root user can perform EMR actions for the cluster, - // regardless of the IAM permissions policies attached to other IAM principals. - // For more information, see Understanding the EMR Cluster VisibleToAllUsers - // Setting (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) - // in the Amazon EMRManagement Guide. + // Amazon Web Services account root user can perform Amazon EMR actions for + // the cluster, regardless of the IAM permissions policies attached to other + // IAM principals. For more information, see Understanding the Amazon EMR cluster + // VisibleToAllUsers setting (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_IAM_emr-with-IAM.html#security_set_visible_to_all_users) + // in the Amazon EMR Management Guide. VisibleToAllUsers *bool `type:"boolean"` } @@ -16353,22 +16630,22 @@ func (s *ScalingAction) SetSimpleScalingPolicyConfiguration(v *SimpleScalingPoli return s } -// The upper and lower EC2 instance limits for an automatic scaling policy. +// The upper and lower Amazon EC2 instance limits for an automatic scaling policy. // Automatic scaling activities triggered by automatic scaling rules will not // cause an instance group to grow above or below these limits. type ScalingConstraints struct { _ struct{} `type:"structure"` - // The upper boundary of EC2 instances in an instance group beyond which scaling - // activities are not allowed to grow. Scale-out activities will not add instances - // beyond this boundary. + // The upper boundary of Amazon EC2 instances in an instance group beyond which + // scaling activities are not allowed to grow. Scale-out activities will not + // add instances beyond this boundary. // // MaxCapacity is a required field MaxCapacity *int64 `type:"integer" required:"true"` - // The lower boundary of EC2 instances in an instance group below which scaling - // activities are not allowed to shrink. Scale-in activities will not terminate - // instances below this boundary. + // The lower boundary of Amazon EC2 instances in an instance group below which + // scaling activities are not allowed to shrink. Scale-in activities will not + // terminate instances below this boundary. // // MinCapacity is a required field MinCapacity *int64 `type:"integer" required:"true"` @@ -16421,9 +16698,9 @@ func (s *ScalingConstraints) SetMinCapacity(v int64) *ScalingConstraints { } // A scale-in or scale-out rule that defines scaling activity, including the -// CloudWatch metric alarm that triggers activity, how EC2 instances are added -// or removed, and the periodicity of adjustments. The automatic scaling policy -// for an instance group can comprise one or more automatic scaling rules. +// CloudWatch metric alarm that triggers activity, how Amazon EC2 instances +// are added or removed, and the periodicity of adjustments. The automatic scaling +// policy for an instance group can comprise one or more automatic scaling rules. type ScalingRule struct { _ struct{} `type:"structure"` @@ -16939,10 +17216,10 @@ type SetVisibleToAllUsersInput struct { JobFlowIds []*string `type:"list" required:"true"` // A value of true indicates that an IAM principal in the Amazon Web Services - // account can perform EMR actions on the cluster that the IAM policies attached - // to the principal allow. A value of false indicates that only the IAM principal - // that created the cluster and the Amazon Web Services root user can perform - // EMR actions on the cluster. + // account can perform Amazon EMR actions on the cluster that the IAM policies + // attached to the principal allow. A value of false indicates that only the + // IAM principal that created the cluster and the Amazon Web Services root user + // can perform Amazon EMR actions on the cluster. // // VisibleToAllUsers is a required field VisibleToAllUsers *bool `type:"boolean" required:"true"` @@ -17061,22 +17338,22 @@ func (s *ShrinkPolicy) SetInstanceResizePolicy(v *InstanceResizePolicy) *ShrinkP } // An automatic scaling configuration, which describes how the policy adds or -// removes instances, the cooldown period, and the number of EC2 instances that -// will be added each time the CloudWatch metric alarm condition is satisfied. +// removes instances, the cooldown period, and the number of Amazon EC2 instances +// that will be added each time the CloudWatch metric alarm condition is satisfied. type SimpleScalingPolicyConfiguration struct { _ struct{} `type:"structure"` - // The way in which EC2 instances are added (if ScalingAdjustment is a positive - // number) or terminated (if ScalingAdjustment is a negative number) each time - // the scaling activity is triggered. CHANGE_IN_CAPACITY is the default. CHANGE_IN_CAPACITY - // indicates that the EC2 instance count increments or decrements by ScalingAdjustment, - // which should be expressed as an integer. PERCENT_CHANGE_IN_CAPACITY indicates - // the instance count increments or decrements by the percentage specified by - // ScalingAdjustment, which should be expressed as an integer. For example, - // 20 indicates an increase in 20% increments of cluster capacity. EXACT_CAPACITY - // indicates the scaling activity results in an instance group with the number - // of EC2 instances specified by ScalingAdjustment, which should be expressed - // as a positive integer. + // The way in which Amazon EC2 instances are added (if ScalingAdjustment is + // a positive number) or terminated (if ScalingAdjustment is a negative number) + // each time the scaling activity is triggered. CHANGE_IN_CAPACITY is the default. + // CHANGE_IN_CAPACITY indicates that the Amazon EC2 instance count increments + // or decrements by ScalingAdjustment, which should be expressed as an integer. + // PERCENT_CHANGE_IN_CAPACITY indicates the instance count increments or decrements + // by the percentage specified by ScalingAdjustment, which should be expressed + // as an integer. For example, 20 indicates an increase in 20% increments of + // cluster capacity. EXACT_CAPACITY indicates the scaling activity results in + // an instance group with the number of Amazon EC2 instances specified by ScalingAdjustment, + // which should be expressed as a positive integer. AdjustmentType *string `type:"string" enum:"AdjustmentType"` // The amount of time, in seconds, after a scaling activity completes before @@ -17085,8 +17362,8 @@ type SimpleScalingPolicyConfiguration struct { CoolDown *int64 `type:"integer"` // The amount by which to scale in or scale out, based on the specified AdjustmentType. - // A positive value adds to the instance group's EC2 instance count while a - // negative number removes instances. If AdjustmentType is set to EXACT_CAPACITY, + // A positive value adds to the instance group's Amazon EC2 instance count while + // a negative number removes instances. If AdjustmentType is set to EXACT_CAPACITY, // the number should only be a positive integer. If AdjustmentType is set to // PERCENT_CHANGE_IN_CAPACITY, the value should express the percentage as an // integer. For example, -20 indicates a decrease in 20% increments of cluster @@ -17190,9 +17467,9 @@ func (s *SimplifiedApplication) SetVersion(v string) *SimplifiedApplication { // determines the defined duration, provisioning timeout behavior, and allocation // strategy. // -// The instance fleet configuration is available only in Amazon EMR versions +// The instance fleet configuration is available only in Amazon EMR releases // 4.8.0 and later, excluding 5.0.x versions. Spot Instance allocation strategy -// is available in Amazon EMR version 5.12.1 and later. +// is available in Amazon EMR releases 5.12.1 and later. // // Spot Instances with a defined duration (also known as Spot blocks) are no // longer available to new customers from July 1, 2021. For customers who have @@ -17356,10 +17633,11 @@ func (s *SpotResizingSpecification) SetTimeoutDurationMinutes(v int64) *SpotResi type StartNotebookExecutionInput struct { _ struct{} `type:"structure"` - // The unique identifier of the EMR Notebook to use for notebook execution. - // - // EditorId is a required field - EditorId *string `type:"string" required:"true"` + // The unique identifier of the Amazon EMR Notebook to use for notebook execution. + EditorId *string `type:"string"` + + // The environment variables associated with the notebook execution. + EnvironmentVariables map[string]*string `type:"map"` // Specifies the execution engine (cluster) that runs the notebook execution. // @@ -17370,25 +17648,32 @@ type StartNotebookExecutionInput struct { NotebookExecutionName *string `type:"string"` // The unique identifier of the Amazon EC2 security group to associate with - // the EMR Notebook for this notebook execution. + // the Amazon EMR Notebook for this notebook execution. NotebookInstanceSecurityGroupId *string `type:"string"` - // Input parameters in JSON format passed to the EMR Notebook at runtime for - // execution. + // Input parameters in JSON format passed to the Amazon EMR Notebook at runtime + // for execution. NotebookParams *string `type:"string"` + // The Amazon S3 location for the notebook execution input. + NotebookS3Location *NotebookS3LocationFromInput_ `type:"structure"` + + // The output format for the notebook execution. + OutputNotebookFormat *string `type:"string" enum:"OutputNotebookFormat"` + + // The Amazon S3 location for the notebook execution output. + OutputNotebookS3Location *OutputNotebookS3LocationFromInput_ `type:"structure"` + // The path and file name of the notebook file for this execution, relative - // to the path specified for the EMR Notebook. For example, if you specify a - // path of s3://MyBucket/MyNotebooks when you create an EMR Notebook for a notebook - // with an ID of e-ABCDEFGHIJK1234567890ABCD (the EditorID of this request), - // and you specify a RelativePath of my_notebook_executions/notebook_execution.ipynb, + // to the path specified for the Amazon EMR Notebook. For example, if you specify + // a path of s3://MyBucket/MyNotebooks when you create an Amazon EMR Notebook + // for a notebook with an ID of e-ABCDEFGHIJK1234567890ABCD (the EditorID of + // this request), and you specify a RelativePath of my_notebook_executions/notebook_execution.ipynb, // the location of the file for the notebook execution is s3://MyBucket/MyNotebooks/e-ABCDEFGHIJK1234567890ABCD/my_notebook_executions/notebook_execution.ipynb. - // - // RelativePath is a required field - RelativePath *string `type:"string" required:"true"` + RelativePath *string `type:"string"` // The name or ARN of the IAM role that is used as the service role for Amazon - // EMR (the EMR role) for the notebook execution. + // EMR (the Amazon EMR role) for the notebook execution. // // ServiceRole is a required field ServiceRole *string `type:"string" required:"true"` @@ -17420,15 +17705,9 @@ func (s StartNotebookExecutionInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *StartNotebookExecutionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "StartNotebookExecutionInput"} - if s.EditorId == nil { - invalidParams.Add(request.NewErrParamRequired("EditorId")) - } if s.ExecutionEngine == nil { invalidParams.Add(request.NewErrParamRequired("ExecutionEngine")) } - if s.RelativePath == nil { - invalidParams.Add(request.NewErrParamRequired("RelativePath")) - } if s.ServiceRole == nil { invalidParams.Add(request.NewErrParamRequired("ServiceRole")) } @@ -17437,6 +17716,16 @@ func (s *StartNotebookExecutionInput) Validate() error { invalidParams.AddNested("ExecutionEngine", err.(request.ErrInvalidParams)) } } + if s.NotebookS3Location != nil { + if err := s.NotebookS3Location.Validate(); err != nil { + invalidParams.AddNested("NotebookS3Location", err.(request.ErrInvalidParams)) + } + } + if s.OutputNotebookS3Location != nil { + if err := s.OutputNotebookS3Location.Validate(); err != nil { + invalidParams.AddNested("OutputNotebookS3Location", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -17450,6 +17739,12 @@ func (s *StartNotebookExecutionInput) SetEditorId(v string) *StartNotebookExecut return s } +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *StartNotebookExecutionInput) SetEnvironmentVariables(v map[string]*string) *StartNotebookExecutionInput { + s.EnvironmentVariables = v + return s +} + // SetExecutionEngine sets the ExecutionEngine field's value. func (s *StartNotebookExecutionInput) SetExecutionEngine(v *ExecutionEngineConfig) *StartNotebookExecutionInput { s.ExecutionEngine = v @@ -17474,6 +17769,24 @@ func (s *StartNotebookExecutionInput) SetNotebookParams(v string) *StartNotebook return s } +// SetNotebookS3Location sets the NotebookS3Location field's value. +func (s *StartNotebookExecutionInput) SetNotebookS3Location(v *NotebookS3LocationFromInput_) *StartNotebookExecutionInput { + s.NotebookS3Location = v + return s +} + +// SetOutputNotebookFormat sets the OutputNotebookFormat field's value. +func (s *StartNotebookExecutionInput) SetOutputNotebookFormat(v string) *StartNotebookExecutionInput { + s.OutputNotebookFormat = &v + return s +} + +// SetOutputNotebookS3Location sets the OutputNotebookS3Location field's value. +func (s *StartNotebookExecutionInput) SetOutputNotebookS3Location(v *OutputNotebookS3LocationFromInput_) *StartNotebookExecutionInput { + s.OutputNotebookS3Location = v + return s +} + // SetRelativePath sets the RelativePath field's value. func (s *StartNotebookExecutionInput) SetRelativePath(v string) *StartNotebookExecutionInput { s.RelativePath = &v @@ -17551,7 +17864,7 @@ type Step struct { // a combination of account ID, role name, and role type using the following // format: arn:partition:service:region:account:resource. // - // For example, arn:aws:iam::1234567890:role/ReadOnly is a correctly formatted + // For example, arn:aws:IAM::1234567890:role/ReadOnly is a correctly formatted // runtime role ARN. ExecutionRoleArn *string `type:"string"` @@ -18127,8 +18440,8 @@ func (s StopNotebookExecutionOutput) GoString() string { type Studio struct { _ struct{} `type:"structure"` - // Specifies whether the Amazon EMR Studio authenticates users using IAM or - // IAM Identity Center. + // Specifies whether the Amazon EMR Studio authenticates users with IAM or IAM + // Identity Center. AuthMode *string `type:"string" enum:"AuthMode"` // The time the Amazon EMR Studio was created. @@ -18399,8 +18712,8 @@ func (s *StudioSummary) SetVpcId(v string) *StudioSummary { } // The list of supported product configurations that allow user-supplied arguments. -// EMR accepts these arguments and forwards them to the corresponding installation -// script as bootstrap action arguments. +// Amazon EMR accepts these arguments and forwards them to the corresponding +// installation script as bootstrap action arguments. type SupportedProductConfig struct { _ struct{} `type:"structure"` @@ -18832,7 +19145,7 @@ func (s *UsernamePassword) SetUsername(v string) *UsernamePassword { } // EBS volume specifications such as volume type, IOPS, size (GiB) and throughput -// (MiB/s) that are requested for the EBS volume attached to an EC2 instance +// (MiB/s) that are requested for the EBS volume attached to an Amazon EC2 instance // in the cluster. type VolumeSpecification struct { _ struct{} `type:"structure"` @@ -19604,6 +19917,18 @@ func OnDemandProvisioningAllocationStrategy_Values() []string { } } +const ( + // OutputNotebookFormatHtml is a OutputNotebookFormat enum value + OutputNotebookFormatHtml = "HTML" +) + +// OutputNotebookFormat_Values returns all elements of the OutputNotebookFormat enum +func OutputNotebookFormat_Values() []string { + return []string{ + OutputNotebookFormatHtml, + } +} + const ( // PlacementGroupStrategySpread is a PlacementGroupStrategy enum value PlacementGroupStrategySpread = "SPREAD" diff --git a/service/rds/api.go b/service/rds/api.go index caa8f119b2c..886b8a8fb76 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -2055,7 +2055,7 @@ func (c *RDS) CreateDBInstanceRequest(input *CreateDBInstanceInput) (req *reques // DBClusterIdentifier doesn't refer to an existing DB cluster. // // - ErrCodeStorageTypeNotSupportedFault "StorageTypeNotSupported" -// Storage of the StorageType specified can't be associated with the DB instance. +// The specified StorageType can't be associated with the DB instance. // // - ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" // The specified CIDR IP range or Amazon EC2 security group might not be authorized @@ -2233,7 +2233,7 @@ func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadRepl // read replica of the same source instance. // // - ErrCodeStorageTypeNotSupportedFault "StorageTypeNotSupported" -// Storage of the StorageType specified can't be associated with the DB instance. +// The specified StorageType can't be associated with the DB instance. // // - ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an Amazon Web Services KMS key. @@ -10830,6 +10830,10 @@ func (c *RDS) ModifyDBClusterRequest(input *ModifyDBClusterInput) (req *request. // - ErrCodeDomainNotFoundFault "DomainNotFoundFault" // Domain doesn't refer to an existing Active Directory domain. // +// - ErrCodeStorageTypeNotAvailableFault "StorageTypeNotAvailableFault" +// The aurora-iopt1 storage type isn't available, because you modified the DB +// cluster to use this storage type less than one month ago. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ModifyDBCluster func (c *RDS) ModifyDBCluster(input *ModifyDBClusterInput) (*ModifyDBClusterOutput, error) { req, out := c.ModifyDBClusterRequest(input) @@ -11262,7 +11266,7 @@ func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *reques // The DB upgrade failed because a resource the DB depends on can't be modified. // // - ErrCodeStorageTypeNotSupportedFault "StorageTypeNotSupported" -// Storage of the StorageType specified can't be associated with the DB instance. +// The specified StorageType can't be associated with the DB instance. // // - ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" // The specified CIDR IP range or Amazon EC2 security group might not be authorized @@ -13570,6 +13574,9 @@ func (c *RDS) RestoreDBClusterFromS3Request(input *RestoreDBClusterFromS3Input) // be able to resolve this error by updating your subnet group to use different // Availability Zones that have more storage available. // +// - ErrCodeStorageTypeNotSupportedFault "StorageTypeNotSupported" +// The specified StorageType can't be associated with the DB instance. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/RestoreDBClusterFromS3 func (c *RDS) RestoreDBClusterFromS3(input *RestoreDBClusterFromS3Input) (*RestoreDBClusterFromS3Output, error) { req, out := c.RestoreDBClusterFromS3Request(input) @@ -14030,7 +14037,7 @@ func (c *RDS) RestoreDBInstanceFromDBSnapshotRequest(input *RestoreDBInstanceFro // The specified option group could not be found. // // - ErrCodeStorageTypeNotSupportedFault "StorageTypeNotSupported" -// Storage of the StorageType specified can't be associated with the DB instance. +// The specified StorageType can't be associated with the DB instance. // // - ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" // The specified CIDR IP range or Amazon EC2 security group might not be authorized @@ -14191,7 +14198,7 @@ func (c *RDS) RestoreDBInstanceFromS3Request(input *RestoreDBInstanceFromS3Input // The specified option group could not be found. // // - ErrCodeStorageTypeNotSupportedFault "StorageTypeNotSupported" -// Storage of the StorageType specified can't be associated with the DB instance. +// The specified StorageType can't be associated with the DB instance. // // - ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" // The specified CIDR IP range or Amazon EC2 security group might not be authorized @@ -14348,7 +14355,7 @@ func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPo // The specified option group could not be found. // // - ErrCodeStorageTypeNotSupportedFault "StorageTypeNotSupported" -// Storage of the StorageType specified can't be associated with the DB instance. +// The specified StorageType can't be associated with the DB instance. // // - ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" // The specified CIDR IP range or Amazon EC2 security group might not be authorized @@ -14891,7 +14898,7 @@ func (c *RDS) StartDBInstanceAutomatedBackupsReplicationRequest(input *StartDBIn // quota is the same as your DB Instance quota. // // - ErrCodeStorageTypeNotSupportedFault "StorageTypeNotSupported" -// Storage of the StorageType specified can't be associated with the DB instance. +// The specified StorageType can't be associated with the DB instance. // // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartDBInstanceAutomatedBackupsReplication func (c *RDS) StartDBInstanceAutomatedBackupsReplication(input *StartDBInstanceAutomatedBackupsReplicationInput) (*StartDBInstanceAutomatedBackupsReplicationOutput, error) { @@ -17391,6 +17398,9 @@ type ClusterPendingModifiedValues struct { // A list of the log types whose configuration is still pending. In other words, // these log types are in the process of being activated or deactivated. PendingCloudwatchLogsExports *PendingCloudwatchLogsExports `type:"structure"` + + // The storage type for the DB cluster. + StorageType *string `type:"string"` } // String returns the string representation. @@ -17459,6 +17469,12 @@ func (s *ClusterPendingModifiedValues) SetPendingCloudwatchLogsExports(v *Pendin return s } +// SetStorageType sets the StorageType field's value. +func (s *ClusterPendingModifiedValues) SetStorageType(v string) *ClusterPendingModifiedValues { + s.StorageType = &v + return s +} + // Specifies the settings that control the size and behavior of the connection // pool associated with a DBProxyTargetGroup. type ConnectionPoolConfiguration struct { @@ -19799,17 +19815,13 @@ type CreateDBClusterInput struct { // // The serverless engine mode only applies for Aurora Serverless v1 DB clusters. // - // Limitations and requirements apply to some DB engine modes. For more information, + // For information about limitations and requirements for Serverless DB clusters, // see the following sections in the Amazon Aurora User Guide: // // * Limitations of Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) // // * Requirements for Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html) // - // * Limitations of parallel query (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations) - // - // * Limitations of Aurora global databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) - // // Valid for: Aurora DB clusters only EngineMode *string `type:"string"` @@ -20227,13 +20239,15 @@ type CreateDBClusterInput struct { // // This setting is required to create a Multi-AZ DB cluster. // - // Valid values: io1 + // When specified for a Multi-AZ DB cluster, a value for the Iops parameter + // is required. // - // When specified, a value for the Iops parameter is required. + // Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB + // clusters) // - // Default: io1 + // Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) // - // Valid for: Multi-AZ DB clusters only + // Valid for: Aurora DB clusters and Multi-AZ DB clusters StorageType *string `type:"string"` // Tags to assign to the DB cluster. @@ -24727,8 +24741,7 @@ type DBCluster struct { // The name of the database engine to be used for this DB cluster. Engine *string `type:"string"` - // The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, - // global, or multimaster. + // The DB engine mode of the DB cluster, either provisioned or serverless. // // For more information, see CreateDBCluster (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html). EngineMode *string `type:"string"` @@ -24765,6 +24778,12 @@ type DBCluster struct { // and Access Management (IAM) accounts to database accounts is enabled. IAMDatabaseAuthenticationEnabled *bool `type:"boolean"` + // The next time you can modify the DB cluster to use the aurora-iopt1 storage + // type. + // + // This setting is only for Aurora DB clusters. + IOOptimizedNextAllowedModificationTime *time.Time `type:"timestamp"` + // The Provisioned IOPS (I/O operations per second) value. // // This setting is only for non-Aurora Multi-AZ DB clusters. @@ -24941,8 +24960,6 @@ type DBCluster struct { StorageEncrypted *bool `type:"boolean"` // The storage type associated with the DB cluster. - // - // This setting is only for non-Aurora Multi-AZ DB clusters. StorageType *string `type:"string"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) @@ -25229,6 +25246,12 @@ func (s *DBCluster) SetIAMDatabaseAuthenticationEnabled(v bool) *DBCluster { return s } +// SetIOOptimizedNextAllowedModificationTime sets the IOOptimizedNextAllowedModificationTime field's value. +func (s *DBCluster) SetIOOptimizedNextAllowedModificationTime(v time.Time) *DBCluster { + s.IOOptimizedNextAllowedModificationTime = &v + return s +} + // SetIops sets the Iops field's value. func (s *DBCluster) SetIops(v int64) *DBCluster { s.Iops = &v @@ -25894,6 +25917,11 @@ type DBClusterSnapshot struct { // Specifies whether the DB cluster snapshot is encrypted. StorageEncrypted *bool `type:"boolean"` + // The storage type associated with the DB cluster snapshot. + // + // This setting is only for Aurora DB clusters. + StorageType *string `type:"string"` + // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) // in the Amazon RDS User Guide. TagList []*Tag `locationNameList:"Tag" type:"list"` @@ -26046,6 +26074,12 @@ func (s *DBClusterSnapshot) SetStorageEncrypted(v bool) *DBClusterSnapshot { return s } +// SetStorageType sets the StorageType field's value. +func (s *DBClusterSnapshot) SetStorageType(v string) *DBClusterSnapshot { + s.StorageType = &v + return s +} + // SetTagList sets the TagList field's value. func (s *DBClusterSnapshot) SetTagList(v []*Tag) *DBClusterSnapshot { s.TagList = v @@ -26726,11 +26760,10 @@ type DBInstance struct { // True if mapping of Amazon Web Services Identity and Access Management (IAM) // accounts to database accounts is enabled, and otherwise false. // - // IAM database authentication can be enabled for the following database engines: - // - // * For MySQL 5.7, minor version 5.7.16 or higher. - // - // * For Amazon Aurora, all versions of Aurora MySQL and Aurora PostgreSQL. + // For a list of engine versions that support IAM database authentication, see + // IAM database authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RDS_Fea_Regions_DB-eng.Feature.IamDatabaseAuthentication.html) + // in the Amazon RDS User Guide and IAM database authentication in Aurora (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.Aurora_Fea_Regions_DB-eng.Feature.IAMdbauth.html) + // in the Amazon Aurora User Guide. IAMDatabaseAuthenticationEnabled *bool `type:"boolean"` // Provides the date and time the DB instance was created. @@ -31661,8 +31694,7 @@ type DescribeDBClusterBacktracksInput struct { // Constraints: // // * Must contain a valid universally unique identifier (UUID). For more - // information about UUIDs, see A Universally Unique Identifier (UUID) URN - // Namespace (http://www.ietf.org/rfc/rfc4122.txt). + // information about UUIDs, see Universally unique identifier (https://en.wikipedia.org/wiki/Universally_unique_identifier). // // Example: 123e4567-e89b-12d3-a456-426655440000 BacktrackIdentifier *string `type:"string"` @@ -40636,13 +40668,15 @@ type ModifyDBClusterInput struct { // Specifies the storage type to be associated with the DB cluster. // - // Valid values: io1 + // When specified for a Multi-AZ DB cluster, a value for the Iops parameter + // is required. // - // When specified, a value for the Iops parameter is required. + // Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB + // clusters) // - // Default: io1 + // Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) // - // Valid for: Multi-AZ DB clusters only + // Valid for: Aurora DB clusters and Multi-AZ DB clusters StorageType *string `type:"string"` // A list of VPC security groups that the DB cluster will belong to. @@ -41578,11 +41612,11 @@ type ModifyDBInstanceInput struct { // A value that indicates whether to manage the master user password with Amazon // Web Services Secrets Manager. // - // If the DB cluster doesn't manage the master user password with Amazon Web + // If the DB instance doesn't manage the master user password with Amazon Web // Services Secrets Manager, you can turn on this management. In this case, // you can't specify MasterUserPassword. // - // If the DB cluster already manages the master user password with Amazon Web + // If the DB instance already manages the master user password with Amazon Web // Services Secrets Manager, and you specify that the master user password is // not managed with Amazon Web Services Secrets Manager, then you must specify // MasterUserPassword. In this case, RDS deletes the secret and uses the new @@ -47319,6 +47353,15 @@ type RestoreDBClusterFromS3Input struct { // A value that indicates whether the restored DB cluster is encrypted. StorageEncrypted *bool `type:"boolean"` + // Specifies the storage type to be associated with the DB cluster. + // + // Valid values: aurora, aurora-iopt1 + // + // Default: aurora + // + // Valid for: Aurora DB clusters only + StorageType *string `type:"string"` + // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) // in the Amazon RDS User Guide. Tags []*Tag `locationNameList:"Tag" type:"list"` @@ -47574,6 +47617,12 @@ func (s *RestoreDBClusterFromS3Input) SetStorageEncrypted(v bool) *RestoreDBClus return s } +// SetStorageType sets the StorageType field's value. +func (s *RestoreDBClusterFromS3Input) SetStorageType(v string) *RestoreDBClusterFromS3Input { + s.StorageType = &v + return s +} + // SetTags sets the Tags field's value. func (s *RestoreDBClusterFromS3Input) SetTags(v []*Tag) *RestoreDBClusterFromS3Input { s.Tags = v @@ -47982,14 +48031,15 @@ type RestoreDBClusterFromSnapshotInput struct { // SnapshotIdentifier is a required field SnapshotIdentifier *string `type:"string" required:"true"` - // Specifies the storage type to be associated with the each DB instance in - // the Multi-AZ DB cluster. + // Specifies the storage type to be associated with the DB cluster. // - // Valid values: io1 + // When specified for a Multi-AZ DB cluster, a value for the Iops parameter + // is required. // - // When specified, a value for the Iops parameter is required. + // Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB + // clusters) // - // Default: io1 + // Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) // // Valid for: Aurora DB clusters and Multi-AZ DB clusters StorageType *string `type:"string"` @@ -48572,16 +48622,17 @@ type RestoreDBClusterToPointInTimeInput struct { // SourceDBClusterIdentifier is a required field SourceDBClusterIdentifier *string `type:"string" required:"true"` - // Specifies the storage type to be associated with the each DB instance in - // the Multi-AZ DB cluster. + // Specifies the storage type to be associated with the DB cluster. // - // Valid values: io1 + // When specified for a Multi-AZ DB cluster, a value for the Iops parameter + // is required. // - // When specified, a value for the Iops parameter is required. + // Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB + // clusters) // - // Default: io1 + // Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) // - // Valid for: Multi-AZ DB clusters only + // Valid for: Aurora DB clusters and Multi-AZ DB clusters StorageType *string `type:"string"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) diff --git a/service/rds/errors.go b/service/rds/errors.go index 6027561b20f..60916d59e0f 100644 --- a/service/rds/errors.go +++ b/service/rds/errors.go @@ -781,10 +781,17 @@ const ( // available across all DB instances. ErrCodeStorageQuotaExceededFault = "StorageQuotaExceeded" + // ErrCodeStorageTypeNotAvailableFault for service response error code + // "StorageTypeNotAvailableFault". + // + // The aurora-iopt1 storage type isn't available, because you modified the DB + // cluster to use this storage type less than one month ago. + ErrCodeStorageTypeNotAvailableFault = "StorageTypeNotAvailableFault" + // ErrCodeStorageTypeNotSupportedFault for service response error code // "StorageTypeNotSupported". // - // Storage of the StorageType specified can't be associated with the DB instance. + // The specified StorageType can't be associated with the DB instance. ErrCodeStorageTypeNotSupportedFault = "StorageTypeNotSupported" // ErrCodeSubnetAlreadyInUse for service response error code diff --git a/service/rds/examples_test.go b/service/rds/examples_test.go index 5f5d577c24f..8c387837d1e 100644 --- a/service/rds/examples_test.go +++ b/service/rds/examples_test.go @@ -2965,6 +2965,8 @@ func ExampleRDS_ModifyDBCluster_shared00() { fmt.Println(rds.ErrCodeDBInstanceAlreadyExistsFault, aerr.Error()) case rds.ErrCodeDomainNotFoundFault: fmt.Println(rds.ErrCodeDomainNotFoundFault, aerr.Error()) + case rds.ErrCodeStorageTypeNotAvailableFault: + fmt.Println(rds.ErrCodeStorageTypeNotAvailableFault, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -3826,6 +3828,8 @@ func ExampleRDS_RestoreDBClusterFromS3_shared00() { fmt.Println(rds.ErrCodeDomainNotFoundFault, aerr.Error()) case rds.ErrCodeInsufficientStorageClusterCapacityFault: fmt.Println(rds.ErrCodeInsufficientStorageClusterCapacityFault, aerr.Error()) + case rds.ErrCodeStorageTypeNotSupportedFault: + fmt.Println(rds.ErrCodeStorageTypeNotSupportedFault, aerr.Error()) default: fmt.Println(aerr.Error()) } diff --git a/service/swf/api.go b/service/swf/api.go index ab5725d4f56..ca1a3c29a96 100644 --- a/service/swf/api.go +++ b/service/swf/api.go @@ -8699,7 +8699,7 @@ type GetWorkflowExecutionHistoryInput struct { // If NextPageToken is returned there are more results available. The value // of NextPageToken is a unique pagination token for each page. Make the call // again using the returned token to retrieve the next page. Keep all other - // arguments unchanged. Each pagination token expires after 60 seconds. Using + // arguments unchanged. Each pagination token expires after 24 hours. Using // an expired pagination token will return a 400 error: "Specified token has // exceeded its maximum lifetime". // @@ -10002,7 +10002,7 @@ type ListActivityTypesInput struct { // If NextPageToken is returned there are more results available. The value // of NextPageToken is a unique pagination token for each page. Make the call // again using the returned token to retrieve the next page. Keep all other - // arguments unchanged. Each pagination token expires after 60 seconds. Using + // arguments unchanged. Each pagination token expires after 24 hours. Using // an expired pagination token will return a 400 error: "Specified token has // exceeded its maximum lifetime". // @@ -10183,7 +10183,7 @@ type ListClosedWorkflowExecutionsInput struct { // If NextPageToken is returned there are more results available. The value // of NextPageToken is a unique pagination token for each page. Make the call // again using the returned token to retrieve the next page. Keep all other - // arguments unchanged. Each pagination token expires after 60 seconds. Using + // arguments unchanged. Each pagination token expires after 24 hours. Using // an expired pagination token will return a 400 error: "Specified token has // exceeded its maximum lifetime". // @@ -10351,7 +10351,7 @@ type ListDomainsInput struct { // If NextPageToken is returned there are more results available. The value // of NextPageToken is a unique pagination token for each page. Make the call // again using the returned token to retrieve the next page. Keep all other - // arguments unchanged. Each pagination token expires after 60 seconds. Using + // arguments unchanged. Each pagination token expires after 24 hours. Using // an expired pagination token will return a 400 error: "Specified token has // exceeded its maximum lifetime". // @@ -10494,7 +10494,7 @@ type ListOpenWorkflowExecutionsInput struct { // If NextPageToken is returned there are more results available. The value // of NextPageToken is a unique pagination token for each page. Make the call // again using the returned token to retrieve the next page. Keep all other - // arguments unchanged. Each pagination token expires after 60 seconds. Using + // arguments unchanged. Each pagination token expires after 24 hours. Using // an expired pagination token will return a 400 error: "Specified token has // exceeded its maximum lifetime". // @@ -10728,7 +10728,7 @@ type ListWorkflowTypesInput struct { // If NextPageToken is returned there are more results available. The value // of NextPageToken is a unique pagination token for each page. Make the call // again using the returned token to retrieve the next page. Keep all other - // arguments unchanged. Each pagination token expires after 60 seconds. Using + // arguments unchanged. Each pagination token expires after 24 hours. Using // an expired pagination token will return a 400 error: "Specified token has // exceeded its maximum lifetime". // @@ -11234,7 +11234,7 @@ type PollForDecisionTaskInput struct { // If NextPageToken is returned there are more results available. The value // of NextPageToken is a unique pagination token for each page. Make the call // again using the returned token to retrieve the next page. Keep all other - // arguments unchanged. Each pagination token expires after 60 seconds. Using + // arguments unchanged. Each pagination token expires after 24 hours. Using // an expired pagination token will return a 400 error: "Specified token has // exceeded its maximum lifetime". // @@ -11251,11 +11251,16 @@ type PollForDecisionTaskInput struct { // are returned in ascending order of the eventTimestamp of the events. ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` + // When set to true, returns the events with eventTimestamp greater than or + // equal to eventTimestamp of the most recent DecisionTaskStarted event. By + // default, this parameter is set to false. + StartAtPreviousStartedEvent *bool `locationName:"startAtPreviousStartedEvent" type:"boolean"` + // Specifies the task list to poll for decision tasks. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not be the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. // // TaskList is a required field TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` @@ -11333,6 +11338,12 @@ func (s *PollForDecisionTaskInput) SetReverseOrder(v bool) *PollForDecisionTaskI return s } +// SetStartAtPreviousStartedEvent sets the StartAtPreviousStartedEvent field's value. +func (s *PollForDecisionTaskInput) SetStartAtPreviousStartedEvent(v bool) *PollForDecisionTaskInput { + s.StartAtPreviousStartedEvent = &v + return s +} + // SetTaskList sets the TaskList field's value. func (s *PollForDecisionTaskInput) SetTaskList(v *TaskList) *PollForDecisionTaskInput { s.TaskList = v @@ -11751,9 +11762,9 @@ type RegisterActivityTypeInput struct { // The name of the activity type within the domain. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not be the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -11763,9 +11774,9 @@ type RegisterActivityTypeInput struct { // The activity type consists of the name and version, the combination of which // must be unique within the domain. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not be the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. // // Version is a required field Version *string `locationName:"version" min:"1" type:"string" required:"true"` @@ -12114,9 +12125,9 @@ type RegisterWorkflowTypeInput struct { // The name of the workflow type. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not be the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -12127,9 +12138,9 @@ type RegisterWorkflowTypeInput struct { // must be unique within the domain. To get a list of all currently registered // workflow types, use the ListWorkflowTypes action. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not be the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. // // Version is a required field Version *string `locationName:"version" min:"1" type:"string" required:"true"` @@ -13217,9 +13228,9 @@ type ScheduleActivityTaskDecisionAttributes struct { // The activityId of the activity task. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. // // ActivityId is a required field ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` @@ -13293,9 +13304,9 @@ type ScheduleActivityTaskDecisionAttributes struct { // nor a default task list was specified at registration time then a fault is // returned. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. TaskList *TaskList `locationName:"taskList" type:"structure"` // If set, specifies the priority with which the activity task is to be assigned @@ -13516,8 +13527,9 @@ type ScheduleLambdaFunctionDecisionAttributes struct { Name *string `locationName:"name" min:"1" type:"string" required:"true"` // The timeout value, in seconds, after which the Lambda function is considered - // to be failed once it has started. This can be any integer from 1-300 (1s-5m). - // If no value is supplied, than a default value of 300s is assumed. + // to be failed once it has started. This can be any integer from 1-900 (1s-15m). + // + // If no value is supplied, then a default value of 900s is assumed. StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` } @@ -14182,7 +14194,7 @@ type StartChildWorkflowExecutionDecisionAttributes struct { // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. TaskList *TaskList `locationName:"taskList" type:"structure"` // A task priority that, if set, specifies the priority for a decision task @@ -14211,9 +14223,9 @@ type StartChildWorkflowExecutionDecisionAttributes struct { // The workflowId of the workflow execution. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. // // WorkflowId is a required field WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` @@ -14713,9 +14725,9 @@ type StartTimerDecisionAttributes struct { // The unique ID of the timer. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. // // TimerId is a required field TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` @@ -14871,6 +14883,10 @@ type StartWorkflowExecutionInput struct { // The name of the domain in which the workflow execution is created. // + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. + // // Domain is a required field Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` @@ -14919,9 +14935,9 @@ type StartWorkflowExecutionInput struct { // is set nor a default task list was specified at registration time then a // fault is returned. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not be the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. TaskList *TaskList `locationName:"taskList" type:"structure"` // The task priority to use for this workflow execution. This overrides any @@ -14954,9 +14970,9 @@ type StartWorkflowExecutionInput struct { // of a previous execution. You cannot have two open workflow executions with // the same workflowId at the same time within the same domain. // - // The specified string must not start or end with whitespace. It must not contain - // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not be the literal string arn. + // The specified string must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f-\u009f). Also, it + // must not be the literal string arn. // // WorkflowId is a required field WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` diff --git a/service/swf/integ_test.go b/service/swf/integ_test.go new file mode 100644 index 00000000000..f23fa282845 --- /dev/null +++ b/service/swf/integ_test.go @@ -0,0 +1,68 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +//go:build go1.16 && integration +// +build go1.16,integration + +package swf_test + +import ( + "context" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/integration" + "github.com/aws/aws-sdk-go/service/swf" +) + +var _ aws.Config +var _ awserr.Error +var _ request.Request + +func TestInteg_00_ListDomains(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + sess := integration.SessionWithDefaultRegion("us-west-2") + svc := swf.New(sess) + params := &swf.ListDomainsInput{ + RegistrationStatus: aws.String("REGISTERED"), + } + _, err := svc.ListDomainsWithContext(ctx, params, func(r *request.Request) { + r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") + }) + if err != nil { + t.Errorf("expect no error, got %v", err) + } +} +func TestInteg_01_DescribeDomain(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + sess := integration.SessionWithDefaultRegion("us-west-2") + svc := swf.New(sess) + params := &swf.DescribeDomainInput{ + Name: aws.String("fake_domain"), + } + _, err := svc.DescribeDomainWithContext(ctx, params, func(r *request.Request) { + r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") + }) + if err == nil { + t.Fatalf("expect request to fail") + } + aerr, ok := err.(awserr.RequestFailure) + if !ok { + t.Fatalf("expect awserr, was %T", err) + } + if len(aerr.Code()) == 0 { + t.Errorf("expect non-empty error code") + } + if len(aerr.Message()) == 0 { + t.Errorf("expect non-empty error message") + } + if v := aerr.Code(); v == request.ErrCodeSerialization { + t.Errorf("expect API error code got serialization failure") + } +} From 88d6da1d059e25e4b88545f750f59ce8d91f28bd Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Thu, 11 May 2023 11:24:49 -0700 Subject: [PATCH 5/7] Release v1.44.262 (2023-05-11) (#4837) Release v1.44.262 (2023-05-11) === ### Service Client Updates * `service/connect`: Updates service documentation * `service/elasticache`: Updates service API and documentation * Added support to modify the cluster mode configuration for the existing ElastiCache ReplicationGroups. Customers can now modify the configuration from cluster mode disabled to cluster mode enabled. * `service/es`: Updates service API and documentation * This release fixes DescribePackages API error with null filter value parameter. * `service/health`: Updates service documentation * Add support for regional endpoints * `service/ivs-realtime`: Updates service API, documentation, and paginators * `service/omics`: Updates service API, documentation, and paginators * `service/opensearch`: Updates service API * `service/route53resolver`: Adds new service * `service/support`: Updates service API and documentation * This release adds 2 new Support APIs, DescribeCreateCaseOptions and DescribeSupportedLanguages. You can use these new APIs to get available support languages. --- CHANGELOG.md | 18 + aws/endpoints/defaults.go | 10 + aws/version.go | 2 +- models/apis/connect/2017-08-08/docs-2.json | 8 +- models/apis/elasticache/2015-02-02/api-2.json | 20 +- .../apis/elasticache/2015-02-02/docs-2.json | 49 +- .../2015-02-02/endpoint-rule-set-1.json | 412 ++- .../2015-02-02/endpoint-tests-1.json | 1483 +------- models/apis/es/2015-01-01/api-2.json | 3 +- models/apis/es/2015-01-01/docs-2.json | 2 +- .../es/2015-01-01/endpoint-rule-set-1.json | 399 ++- .../apis/es/2015-01-01/endpoint-tests-1.json | 1477 +------- models/apis/health/2016-08-04/docs-2.json | 34 +- .../2016-08-04/endpoint-rule-set-1.json | 404 +++ .../health/2016-08-04/endpoint-tests-1.json | 358 ++ .../apis/ivs-realtime/2020-07-14/api-2.json | 287 ++ .../apis/ivs-realtime/2020-07-14/docs-2.json | 223 +- .../ivs-realtime/2020-07-14/paginators-1.json | 15 + models/apis/omics/2022-11-28/api-2.json | 1431 ++++++-- models/apis/omics/2022-11-28/docs-2.json | 360 +- .../omics/2022-11-28/endpoint-tests-1.json | 92 +- .../apis/omics/2022-11-28/paginators-1.json | 12 + models/apis/omics/2022-11-28/smoke.json | 6 + models/apis/opensearch/2021-01-01/api-2.json | 3 +- .../2018-04-01/endpoint-rule-set-1.json | 38 + .../2018-04-01/endpoint-tests-1.json | 237 +- models/apis/support/2013-04-15/api-2.json | 175 +- models/apis/support/2013-04-15/docs-2.json | 172 +- .../2013-04-15/endpoint-rule-set-1.json | 1465 +++----- .../support/2013-04-15/endpoint-tests-1.json | 159 +- models/endpoints/endpoints.json | 6 + service/connect/api.go | 14 +- service/elasticache/api.go | 121 +- service/elasticsearchservice/api.go | 37 +- service/health/api.go | 64 +- service/health/doc.go | 43 +- service/ivsrealtime/api.go | 2466 +++++++++++--- service/ivsrealtime/doc.go | 26 +- .../ivsrealtime/ivsrealtimeiface/interface.go | 29 + service/omics/api.go | 3029 +++++++++++++++-- service/omics/doc.go | 2 +- service/omics/errors.go | 25 +- service/omics/omicsiface/interface.go | 34 +- service/opensearchservice/api.go | 35 +- service/support/api.go | 768 ++++- service/support/errors.go | 8 + service/support/supportiface/interface.go | 8 + 47 files changed, 10708 insertions(+), 5361 deletions(-) create mode 100644 models/apis/health/2016-08-04/endpoint-rule-set-1.json create mode 100644 models/apis/health/2016-08-04/endpoint-tests-1.json create mode 100644 models/apis/omics/2022-11-28/smoke.json diff --git a/CHANGELOG.md b/CHANGELOG.md index cfd37b8c6d1..af33473b57f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,21 @@ +Release v1.44.262 (2023-05-11) +=== + +### Service Client Updates +* `service/connect`: Updates service documentation +* `service/elasticache`: Updates service API and documentation + * Added support to modify the cluster mode configuration for the existing ElastiCache ReplicationGroups. Customers can now modify the configuration from cluster mode disabled to cluster mode enabled. +* `service/es`: Updates service API and documentation + * This release fixes DescribePackages API error with null filter value parameter. +* `service/health`: Updates service documentation + * Add support for regional endpoints +* `service/ivs-realtime`: Updates service API, documentation, and paginators +* `service/omics`: Updates service API, documentation, and paginators +* `service/opensearch`: Updates service API +* `service/route53resolver`: Adds new service +* `service/support`: Updates service API and documentation + * This release adds 2 new Support APIs, DescribeCreateCaseOptions and DescribeSupportedLanguages. You can use these new APIs to get available support languages. + Release v1.44.261 (2023-05-10) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 0e8f4c6063f..83a27225b0d 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -31062,6 +31062,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index 51450cc09bc..773561f1a36 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.261" +const SDKVersion = "1.44.262" diff --git a/models/apis/connect/2017-08-08/docs-2.json b/models/apis/connect/2017-08-08/docs-2.json index 2827ac1c0b9..c0f61527d00 100644 --- a/models/apis/connect/2017-08-08/docs-2.json +++ b/models/apis/connect/2017-08-08/docs-2.json @@ -86,7 +86,7 @@ "GetCurrentUserData": "

Gets the real-time active user data from the specified Amazon Connect instance.

", "GetFederationToken": "

Retrieves a token for federation.

This API doesn't support root users. If you try to invoke GetFederationToken with root credentials, an error message similar to the following one appears:

Provided identity: Principal: .... User: .... cannot be used for federation with Amazon Connect

", "GetMetricData": "

Gets historical metric data from the specified Amazon Connect instance.

For a description of each historical metric, see Historical Metrics Definitions in the Amazon Connect Administrator Guide.

", - "GetMetricDataV2": "

Gets metric data from the specified Amazon Connect instance.

GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 14 days, in 24-hour intervals.

For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator's Guide.

This API is not available in the Amazon Web Services GovCloud (US) Regions.

", + "GetMetricDataV2": "

Gets metric data from the specified Amazon Connect instance.

GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 35 days, in 24-hour intervals.

For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator's Guide.

This API is not available in the Amazon Web Services GovCloud (US) Regions.

", "GetTaskTemplate": "

Gets details about a specific task template in the specified Amazon Connect instance.

", "GetTrafficDistribution": "

Retrieves the current traffic distribution for a given traffic distribution group.

", "ListAgentStatuses": "

This API is in preview release for Amazon Connect and is subject to change.

Lists agent statuses.

", @@ -164,7 +164,7 @@ "UpdateInstanceAttribute": "

This API is in preview release for Amazon Connect and is subject to change.

Updates the value for the specified attribute type.

", "UpdateInstanceStorageConfig": "

This API is in preview release for Amazon Connect and is subject to change.

Updates an existing configuration for a resource type. This API is idempotent.

", "UpdateParticipantRoleConfig": "

Updates timeouts for when human chat participants are to be considered idle, and when agents are automatically disconnected from a chat due to idleness. You can set four timers:

For more information about how chat timeouts work, see Set up chat timeouts for human participants.

", - "UpdatePhoneNumber": "

Updates your claimed phone number from its current Amazon Connect instance or traffic distribution group to another Amazon Connect instance or traffic distribution group in the same Amazon Web Services Region.

You can call DescribePhoneNumber API to verify the status of a previous UpdatePhoneNumber operation.

", + "UpdatePhoneNumber": "

Updates your claimed phone number from its current Amazon Connect instance or traffic distribution group to another Amazon Connect instance or traffic distribution group in the same Amazon Web Services Region.

After using this API, you must verify that the phone number is attached to the correct flow in the target instance or traffic distribution group. You need to do this because the API switches only the phone number to a new instance or traffic distribution group. It doesn't migrate the flow configuration of the phone number, too.

You can call DescribePhoneNumber API to verify the status of a previous UpdatePhoneNumber operation.

", "UpdateQueueHoursOfOperation": "

This API is in preview release for Amazon Connect and is subject to change.

Updates the hours of operation for the specified queue.

", "UpdateQueueMaxContacts": "

This API is in preview release for Amazon Connect and is subject to change.

Updates the maximum number of contacts allowed in a queue before it is considered full.

", "UpdateQueueName": "

This API is in preview release for Amazon Connect and is subject to change.

Updates the name and description of a queue. At least Name or Description must be provided.

", @@ -3661,7 +3661,7 @@ "MetricNameV2": { "base": null, "refs": { - "MetricV2$Name": "

The name of the metric.

" + "MetricV2$Name": "

The name of the metric.

This parameter is required. The following Required = No is incorrect.

" } }, "MetricResultV2": { @@ -5624,7 +5624,7 @@ "EvaluationFormVersionSummary$LastModifiedTime": "

The timestamp for when the evaluation form was last updated.

", "EvaluationSummary$CreatedTime": "

The timestamp for when the evaluation was created.

", "EvaluationSummary$LastModifiedTime": "

The timestamp for when the evaluation was last updated.

", - "GetMetricDataV2Request$StartTime": "

The timestamp, in UNIX Epoch time format, at which to start the reporting interval for the retrieval of historical metrics data. The time must be before the end time timestamp. The time range between the start and end time must be less than 24 hours. The start time cannot be earlier than 14 days before the time of the request. Historical metrics are available for 14 days.

", + "GetMetricDataV2Request$StartTime": "

The timestamp, in UNIX Epoch time format, at which to start the reporting interval for the retrieval of historical metrics data. The time must be before the end time timestamp. The time range between the start and end time must be less than 24 hours. The start time cannot be earlier than 35 days before the time of the request. Historical metrics are available for 35 days.

", "GetMetricDataV2Request$EndTime": "

The timestamp, in UNIX Epoch time format, at which to end the reporting interval for the retrieval of historical metrics data. The time must be later than the start time timestamp. It cannot be later than the current timestamp.

The time range between the start and end time must be less than 24 hours.

", "Instance$CreatedTime": "

When the instance was created.

", "InstanceSummary$CreatedTime": "

When the instance was created.

", diff --git a/models/apis/elasticache/2015-02-02/api-2.json b/models/apis/elasticache/2015-02-02/api-2.json index c673302f7b0..c38af586b66 100644 --- a/models/apis/elasticache/2015-02-02/api-2.json +++ b/models/apis/elasticache/2015-02-02/api-2.json @@ -1881,6 +1881,14 @@ "locationName":"ClusterId" } }, + "ClusterMode":{ + "type":"string", + "enum":[ + "enabled", + "disabled", + "compatible" + ] + }, "ClusterQuotaForCustomerExceededFault":{ "type":"structure", "members":{ @@ -2100,7 +2108,8 @@ "DataTieringEnabled":{"shape":"BooleanOptional"}, "NetworkType":{"shape":"NetworkType"}, "IpDiscovery":{"shape":"IpDiscovery"}, - "TransitEncryptionMode":{"shape":"TransitEncryptionMode"} + "TransitEncryptionMode":{"shape":"TransitEncryptionMode"}, + "ClusterMode":{"shape":"ClusterMode"} } }, "CreateReplicationGroupResult":{ @@ -3207,7 +3216,8 @@ "LogDeliveryConfigurations":{"shape":"LogDeliveryConfigurationRequestList"}, "IpDiscovery":{"shape":"IpDiscovery"}, "TransitEncryptionEnabled":{"shape":"BooleanOptional"}, - "TransitEncryptionMode":{"shape":"TransitEncryptionMode"} + "TransitEncryptionMode":{"shape":"TransitEncryptionMode"}, + "ClusterMode":{"shape":"ClusterMode"} } }, "ModifyReplicationGroupResult":{ @@ -3743,7 +3753,8 @@ "AutoMinorVersionUpgrade":{"shape":"Boolean"}, "NetworkType":{"shape":"NetworkType"}, "IpDiscovery":{"shape":"IpDiscovery"}, - "TransitEncryptionMode":{"shape":"TransitEncryptionMode"} + "TransitEncryptionMode":{"shape":"TransitEncryptionMode"}, + "ClusterMode":{"shape":"ClusterMode"} }, "wrapper":true }, @@ -3827,7 +3838,8 @@ "UserGroups":{"shape":"UserGroupsUpdateStatus"}, "LogDeliveryConfigurations":{"shape":"PendingLogDeliveryConfigurationList"}, "TransitEncryptionEnabled":{"shape":"BooleanOptional"}, - "TransitEncryptionMode":{"shape":"TransitEncryptionMode"} + "TransitEncryptionMode":{"shape":"TransitEncryptionMode"}, + "ClusterMode":{"shape":"ClusterMode"} } }, "ReservedCacheNode":{ diff --git a/models/apis/elasticache/2015-02-02/docs-2.json b/models/apis/elasticache/2015-02-02/docs-2.json index ec3de0c7f04..ab95547621e 100644 --- a/models/apis/elasticache/2015-02-02/docs-2.json +++ b/models/apis/elasticache/2015-02-02/docs-2.json @@ -222,7 +222,7 @@ "ModifyReplicationGroupShardConfigurationMessage$ApplyImmediately": "

Indicates that the shard reconfiguration process begins immediately. At present, the only permitted value for this parameter is true.

Value: true

", "Parameter$IsModifiable": "

Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

", "RebalanceSlotsInGlobalReplicationGroupMessage$ApplyImmediately": "

If True, redistribution is applied immediately.

", - "ReplicationGroup$AutoMinorVersionUpgrade": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

", + "ReplicationGroup$AutoMinorVersionUpgrade": "

If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions.

", "ResetCacheParameterGroupMessage$ResetAllParameters": "

If true, all parameters in the cache parameter group are reset to their default values. If false, only the parameters listed by ParameterNameValues are reset to their default values.

Valid values: true | false

", "Snapshot$AutoMinorVersionUpgrade": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" } @@ -234,7 +234,7 @@ "CacheCluster$TransitEncryptionEnabled": "

A flag that enables in-transit encryption when set to true.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

", "CacheCluster$AtRestEncryptionEnabled": "

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

", "CreateCacheClusterMessage$AutoMinorVersionUpgrade": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

", - "CreateCacheClusterMessage$TransitEncryptionEnabled": "

A flag that enables in-transit encryption when set to true.

Only available when creating a cache cluster in an Amazon VPC using Memcached version 1.6.12 or later.

", + "CreateCacheClusterMessage$TransitEncryptionEnabled": "

A flag that enables in-transit encryption when set to true.

", "CreateReplicationGroupMessage$AutomaticFailoverEnabled": "

Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.

AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.

Default: false

", "CreateReplicationGroupMessage$MultiAZEnabled": "

A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ.

", "CreateReplicationGroupMessage$AutoMinorVersionUpgrade": "

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

", @@ -552,6 +552,15 @@ "ReplicationGroup$MemberClusters": "

The names of all the cache clusters that are part of this replication group.

" } }, + "ClusterMode": { + "base": null, + "refs": { + "CreateReplicationGroupMessage$ClusterMode": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

", + "ModifyReplicationGroupMessage$ClusterMode": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

", + "ReplicationGroup$ClusterMode": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

", + "ReplicationGroupPendingModifiedValues$ClusterMode": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled.

" + } + }, "ClusterQuotaForCustomerExceededFault": { "base": "

The request cannot be processed because it would exceed the allowed number of clusters per customer.

", "refs": { @@ -1257,12 +1266,12 @@ "IpDiscovery": { "base": null, "refs": { - "CacheCluster$IpDiscovery": "

The network type associated with the cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", - "CreateCacheClusterMessage$IpDiscovery": "

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", - "CreateReplicationGroupMessage$IpDiscovery": "

The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", - "ModifyCacheClusterMessage$IpDiscovery": "

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", - "ModifyReplicationGroupMessage$IpDiscovery": "

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", - "ReplicationGroup$IpDiscovery": "

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "CacheCluster$IpDiscovery": "

The network type associated with the cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", + "CreateCacheClusterMessage$IpDiscovery": "

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", + "CreateReplicationGroupMessage$IpDiscovery": "

The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", + "ModifyCacheClusterMessage$IpDiscovery": "

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", + "ModifyReplicationGroupMessage$IpDiscovery": "

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", + "ReplicationGroup$IpDiscovery": "

The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "KeyList": { @@ -1411,18 +1420,18 @@ "NetworkType": { "base": null, "refs": { - "CacheCluster$NetworkType": "

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", - "CreateCacheClusterMessage$NetworkType": "

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", - "CreateReplicationGroupMessage$NetworkType": "

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", + "CacheCluster$NetworkType": "

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", + "CreateCacheClusterMessage$NetworkType": "

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", + "CreateReplicationGroupMessage$NetworkType": "

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", "NetworkTypeList$member": null, - "ReplicationGroup$NetworkType": "

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "ReplicationGroup$NetworkType": "

Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "NetworkTypeList": { "base": null, "refs": { - "CacheSubnetGroup$SupportedNetworkTypes": "

Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", - "Subnet$SupportedNetworkTypes": "

Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "CacheSubnetGroup$SupportedNetworkTypes": "

Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

", + "Subnet$SupportedNetworkTypes": "

Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" } }, "NoOperationFault": { @@ -2064,7 +2073,7 @@ "CacheClusterMessage$Marker": "

Provides an identifier to allow retrieval of paginated results.

", "CacheEngineVersion$Engine": "

The name of the cache engine.

", "CacheEngineVersion$EngineVersion": "

The version number of the cache engine.

", - "CacheEngineVersion$CacheParameterGroupFamily": "

The name of the cache parameter group family associated with this cache engine.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x

", + "CacheEngineVersion$CacheParameterGroupFamily": "

The name of the cache parameter group family associated with this cache engine.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7

", "CacheEngineVersion$CacheEngineDescription": "

The description of the cache engine.

", "CacheEngineVersion$CacheEngineVersionDescription": "

The description of the cache engine version.

", "CacheEngineVersionMessage$Marker": "

Provides an identifier to allow retrieval of paginated results.

", @@ -2085,7 +2094,7 @@ "CacheNodeTypeSpecificValue$Value": "

The value for the cache node type.

", "CacheNodeUpdateStatus$CacheNodeId": "

The node ID of the cache cluster

", "CacheParameterGroup$CacheParameterGroupName": "

The name of the cache parameter group.

", - "CacheParameterGroup$CacheParameterGroupFamily": "

The name of the cache parameter group family that this cache parameter group is compatible with.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x |

", + "CacheParameterGroup$CacheParameterGroupFamily": "

The name of the cache parameter group family that this cache parameter group is compatible with.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7

", "CacheParameterGroup$Description": "

The description for this cache parameter group.

", "CacheParameterGroup$ARN": "

The ARN (Amazon Resource Name) of the cache parameter group.

", "CacheParameterGroupDetails$Marker": "

Provides an identifier to allow retrieval of paginated results.

", @@ -2128,7 +2137,7 @@ "CreateCacheClusterMessage$AuthToken": "

Reserved parameter. The password used to access a password protected server.

Password constraints:

For more information, see AUTH password at http://redis.io/commands/AUTH.

", "CreateCacheClusterMessage$PreferredOutpostArn": "

The outpost ARN in which the cache cluster is created.

", "CreateCacheParameterGroupMessage$CacheParameterGroupName": "

A user-specified name for the cache parameter group.

", - "CreateCacheParameterGroupMessage$CacheParameterGroupFamily": "

The name of the cache parameter group family that the cache parameter group can be used with.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x

", + "CreateCacheParameterGroupMessage$CacheParameterGroupFamily": "

The name of the cache parameter group family that the cache parameter group can be used with.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7

", "CreateCacheParameterGroupMessage$Description": "

A user-specified description for the cache parameter group.

", "CreateCacheSecurityGroupMessage$CacheSecurityGroupName": "

A name for the cache security group. This value is stored as a lowercase string.

Constraints: Must contain no more than 255 alphanumeric characters. Cannot be the word \"Default\".

Example: mysecuritygroup

", "CreateCacheSecurityGroupMessage$Description": "

A description for the cache security group.

", @@ -2230,7 +2239,7 @@ "EC2SecurityGroup$EC2SecurityGroupName": "

The name of the Amazon EC2 security group.

", "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

The Amazon account ID of the Amazon EC2 security group owner.

", "Endpoint$Address": "

The DNS hostname of the cache node.

", - "EngineDefaults$CacheParameterGroupFamily": "

Specifies the name of the cache parameter group family to which the engine default parameters apply.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 | redis6.x

", + "EngineDefaults$CacheParameterGroupFamily": "

Specifies the name of the cache parameter group family to which the engine default parameters apply.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 | redis6.x | redis7

", "EngineDefaults$Marker": "

Provides an identifier to allow retrieval of paginated results.

", "Event$SourceIdentifier": "

The identifier for the source of the event. For example, if the event occurred at the cluster level, the identifier would be the name of the cluster.

", "Event$Message": "

The text of the event.

", @@ -2566,8 +2575,8 @@ "base": null, "refs": { "CacheCluster$TransitEncryptionMode": "

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

", - "CreateReplicationGroupMessage$TransitEncryptionMode": "

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to required to allow encrypted connections only.

Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred first, after that you can set TransitEncryptionMode to required.

", - "ModifyReplicationGroupMessage$TransitEncryptionMode": "

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can set the value to required to allow encrypted connections only.

Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred first, after that you can set TransitEncryptionMode to required.

", + "CreateReplicationGroupMessage$TransitEncryptionMode": "

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to required to allow encrypted connections only.

Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required.

This process will not trigger the replacement of the replication group.

", + "ModifyReplicationGroupMessage$TransitEncryptionMode": "

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can set the value to required to allow encrypted connections only.

Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required.

", "PendingModifiedValues$TransitEncryptionMode": "

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

", "ReplicationGroup$TransitEncryptionMode": "

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

", "ReplicationGroupPendingModifiedValues$TransitEncryptionMode": "

A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.

" diff --git a/models/apis/elasticache/2015-02-02/endpoint-rule-set-1.json b/models/apis/elasticache/2015-02-02/endpoint-rule-set-1.json index c7a4c06d703..db2e6d16336 100644 --- a/models/apis/elasticache/2015-02-02/endpoint-rule-set-1.json +++ b/models/apis/elasticache/2015-02-02/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,199 +111,263 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsDualStack" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticache-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://elasticache-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://elasticache.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://elasticache.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://elasticache-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://elasticache-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticache.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://elasticache.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticache.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://elasticache.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/elasticache/2015-02-02/endpoint-tests-1.json b/models/apis/elasticache/2015-02-02/endpoint-tests-1.json index b65711ace26..f34b1cd50be 100644 --- a/models/apis/elasticache/2015-02-02/endpoint-tests-1.json +++ b/models/apis/elasticache/2015-02-02/endpoint-tests-1.json @@ -1,94 +1,68 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-south-2.amazonaws.com" + "url": "https://elasticache.af-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-south-2", + "Region": "af-south-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-south-2.api.aws" + "url": "https://elasticache.ap-east-1.amazonaws.com" } }, "params": { + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-south-2.amazonaws.com" + "url": "https://elasticache.ap-northeast-1.amazonaws.com" } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-south-2", "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-south-1.amazonaws.com" + "url": "https://elasticache.ap-northeast-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-south-1", + "Region": "ap-northeast-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-south-1.api.aws" + "url": "https://elasticache.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true + "UseDualStack": false } }, { @@ -99,258 +73,50 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-south-1.amazonaws.com" - } - }, - "params": { "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.eu-south-2.amazonaws.com" + "url": "https://elasticache.ap-southeast-1.amazonaws.com" } }, "params": { + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.us-gov-east-1.amazonaws.com" + "url": "https://elasticache.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.me-central-1.amazonaws.com" + "url": "https://elasticache.ap-southeast-3.amazonaws.com" } }, "params": { + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", "UseDualStack": false } }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, { "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { @@ -359,1352 +125,459 @@ } }, "params": { - "UseFIPS": false, "Region": "ca-central-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.eu-central-1.amazonaws.com" + "url": "https://elasticache.eu-central-1.amazonaws.com" } }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseFIPS": true, - "Region": "us-iso-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-iso-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseFIPS": false, - "Region": "us-iso-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-iso-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true + "params": { + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.us-gov-west-1.amazonaws.com" + "url": "https://elasticache.eu-north-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", + "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.us-gov-west-1.api.aws" + "url": "https://elasticache.eu-south-1.amazonaws.com" } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.us-gov-west-1.amazonaws.com" + "url": "https://elasticache.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "Region": "us-gov-west-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-southeast-1.api.aws" + "url": "https://elasticache.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-southeast-1.amazonaws.com" + "url": "https://elasticache.eu-west-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", + "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-southeast-1.api.aws" + "url": "https://elasticache.me-south-1.amazonaws.com" } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-southeast-1.amazonaws.com" + "url": "https://elasticache.sa-east-1.amazonaws.com" } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-southeast-2.api.aws" + "url": "https://elasticache.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-southeast-2.amazonaws.com" + "url": "https://elasticache-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-southeast-2.api.aws" + "url": "https://elasticache.us-east-2.amazonaws.com" } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-southeast-2.amazonaws.com" + "url": "https://elasticache-fips.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-2", + "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://elasticache.us-west-1.amazonaws.com" + } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://elasticache-fips.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://elasticache.us-west-2.amazonaws.com" + } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.us-iso-east-1.c2s.ic.gov" + "url": "https://elasticache-fips.us-west-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-iso-east-1", + "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-southeast-3.api.aws" + "url": "https://elasticache-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-3", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-southeast-3.amazonaws.com" + "url": "https://elasticache.us-east-1.api.aws" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-southeast-3.api.aws" + "url": "https://elasticache.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-southeast-3.amazonaws.com" + "url": "https://elasticache.cn-northwest-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "ap-southeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-southeast-4.api.aws" + "url": "https://elasticache-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.ap-southeast-4.amazonaws.com" + "url": "https://elasticache-fips.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-southeast-4.api.aws" + "url": "https://elasticache.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.ap-southeast-4.amazonaws.com" + "url": "https://elasticache.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-4", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.us-east-1.api.aws" + "url": "https://elasticache.us-gov-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": true + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.us-east-1.amazonaws.com" + "url": "https://elasticache.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticache.us-east-1.api.aws" + "url": "https://elasticache-fips.us-gov-east-1.api.aws" } }, "params": { - "UseFIPS": false, - "Region": "us-east-1", + "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.us-east-1.amazonaws.com" + "url": "https://elasticache.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-east-1", + "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.us-east-2.api.aws" + "url": "https://elasticache.us-gov-east-1.api.aws" } }, "params": { - "UseFIPS": true, - "Region": "us-east-2", + "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticache-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.us-east-2.api.aws" + "url": "https://elasticache.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.us-east-2.amazonaws.com" + "url": "https://elasticache.us-iso-west-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-west-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticache-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://elasticache-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticache.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticache.cn-northwest-1.amazonaws.com.cn" + "url": "https://elasticache.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": false } }, @@ -1714,8 +587,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -1727,8 +600,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -1738,26 +611,27 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://elasticache.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-isob-east-1", - "UseDualStack": false + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -1765,7 +639,6 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1776,8 +649,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1788,11 +661,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/es/2015-01-01/api-2.json b/models/apis/es/2015-01-01/api-2.json index a7d138fa60a..c9a5696275c 100644 --- a/models/apis/es/2015-01-01/api-2.json +++ b/models/apis/es/2015-01-01/api-2.json @@ -1544,7 +1544,8 @@ }, "DescribePackagesFilterValues":{ "type":"list", - "member":{"shape":"DescribePackagesFilterValue"} + "member":{"shape":"DescribePackagesFilterValue"}, + "min":1 }, "DescribePackagesRequest":{ "type":"structure", diff --git a/models/apis/es/2015-01-01/docs-2.json b/models/apis/es/2015-01-01/docs-2.json index 9a1f04bd25a..81b421d7a68 100644 --- a/models/apis/es/2015-01-01/docs-2.json +++ b/models/apis/es/2015-01-01/docs-2.json @@ -667,7 +667,7 @@ "DescribePackagesFilterValues": { "base": null, "refs": { - "DescribePackagesFilter$Value": "

A list of values for the specified field.

" + "DescribePackagesFilter$Value": "

A non-empty list of values for the specified field.

" } }, "DescribePackagesRequest": { diff --git a/models/apis/es/2015-01-01/endpoint-rule-set-1.json b/models/apis/es/2015-01-01/endpoint-rule-set-1.json index 78b6817727f..07a7ff33961 100644 --- a/models/apis/es/2015-01-01/endpoint-rule-set-1.json +++ b/models/apis/es/2015-01-01/endpoint-rule-set-1.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://es-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://es-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://es-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://es.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://es-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://es.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://es.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://es.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/es/2015-01-01/endpoint-tests-1.json b/models/apis/es/2015-01-01/endpoint-tests-1.json index 000cf7fba1f..491f64e69dc 100644 --- a/models/apis/es/2015-01-01/endpoint-tests-1.json +++ b/models/apis/es/2015-01-01/endpoint-tests-1.json @@ -1,94 +1,68 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-south-2.amazonaws.com" + "url": "https://es.af-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-south-2", + "Region": "af-south-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-south-2.api.aws" + "url": "https://es.ap-east-1.amazonaws.com" } }, "params": { + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-south-2.amazonaws.com" + "url": "https://es.ap-northeast-1.amazonaws.com" } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-south-2", "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-south-1.amazonaws.com" + "url": "https://es.ap-northeast-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-south-1", + "Region": "ap-northeast-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-south-1.api.aws" + "url": "https://es.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true + "UseDualStack": false } }, { @@ -99,258 +73,50 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.eu-south-1.amazonaws.com" - } - }, - "params": { "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.eu-south-2.amazonaws.com" + "url": "https://es.ap-southeast-1.amazonaws.com" } }, "params": { + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.us-gov-east-1.amazonaws.com" + "url": "https://es.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.me-central-1.amazonaws.com" + "url": "https://es.ap-southeast-3.amazonaws.com" } }, "params": { + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", "UseDualStack": false } }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, { "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { @@ -359,1352 +125,459 @@ } }, "params": { - "UseFIPS": false, "Region": "ca-central-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "url": "https://es.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseFIPS": true, - "Region": "us-iso-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-iso-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseFIPS": false, - "Region": "us-iso-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-iso-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://es.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.us-gov-west-1.amazonaws.com" + "url": "https://es.eu-north-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", + "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.us-gov-west-1.api.aws" + "url": "https://es.eu-south-1.amazonaws.com" } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.us-gov-west-1.amazonaws.com" + "url": "https://es.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "Region": "us-gov-west-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-southeast-1.api.aws" + "url": "https://es.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-southeast-1.amazonaws.com" + "url": "https://es.eu-west-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", + "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-southeast-1.api.aws" + "url": "https://es.me-south-1.amazonaws.com" } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-southeast-1.amazonaws.com" + "url": "https://es.sa-east-1.amazonaws.com" } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-southeast-2.api.aws" + "url": "https://es.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-southeast-2.amazonaws.com" + "url": "https://es-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-southeast-2.api.aws" + "url": "https://es.us-east-2.amazonaws.com" } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-southeast-2.amazonaws.com" + "url": "https://es-fips.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-2", + "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://es.us-west-1.amazonaws.com" + } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://es-fips.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://es.us-west-2.amazonaws.com" + } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.us-iso-east-1.c2s.ic.gov" + "url": "https://es-fips.us-west-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-iso-east-1", + "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-southeast-3.api.aws" + "url": "https://es-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-3", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-southeast-3.amazonaws.com" + "url": "https://es.us-east-1.api.aws" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-southeast-3.api.aws" + "url": "https://es.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-southeast-3.amazonaws.com" + "url": "https://es.cn-northwest-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "ap-southeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-southeast-4.api.aws" + "url": "https://es-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.ap-southeast-4.amazonaws.com" + "url": "https://es-fips.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://es.ap-southeast-4.api.aws" + "url": "https://es.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.ap-southeast-4.amazonaws.com" + "url": "https://es.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-4", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://es-fips.us-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.us-east-1.amazonaws.com" + "url": "https://es-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.us-east-1.api.aws" + "url": "https://es.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.us-east-1.amazonaws.com" + "url": "https://es-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-east-1", + "Region": "us-gov-west-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://es-fips.us-east-2.api.aws" + "url": "https://es-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://es-fips.us-east-2.amazonaws.com" + "url": "https://es.us-gov-east-1.api.aws" } }, "params": { - "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.us-east-2.api.aws" + "url": "https://es.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.us-east-2.amazonaws.com" + "url": "https://es.us-iso-west-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-west-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://es-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://es-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://es.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://es.cn-northwest-1.amazonaws.com.cn" + "url": "https://es.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": false } }, @@ -1714,8 +587,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -1727,8 +600,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -1738,26 +611,27 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://es.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-isob-east-1", - "UseDualStack": false + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -1765,7 +639,6 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1776,8 +649,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1788,11 +661,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/health/2016-08-04/docs-2.json b/models/apis/health/2016-08-04/docs-2.json index 9b66cc4663a..6a252897543 100644 --- a/models/apis/health/2016-08-04/docs-2.json +++ b/models/apis/health/2016-08-04/docs-2.json @@ -1,19 +1,19 @@ { "version": "2.0", - "service": "Health

The Health API provides programmatic access to the Health information that appears in the Personal Health Dashboard. You can use the API operations to get information about events that might affect your Amazon Web Services services and resources.

For authentication of requests, Health uses the Signature Version 4 Signing Process.

If your Amazon Web Services account is part of Organizations, you can use the Health organizational view feature. This feature provides a centralized view of Health events across all accounts in your organization. You can aggregate Health events in real time to identify accounts in your organization that are affected by an operational event or get notified of security vulnerabilities. Use the organizational view API operations to enable this feature and return event information. For more information, see Aggregating Health events in the Health User Guide.

When you use the Health API operations to return Health events, see the following recommendations:

", + "service": "Health

The Health API provides access to the Health information that appears in the Health Dashboard. You can use the API operations to get information about events that might affect your Amazon Web Services and resources.

You must have a Business, Enterprise On-Ramp, or Enterprise Support plan from Amazon Web Services Support to use the Health API. If you call the Health API from an Amazon Web Services account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, you receive a SubscriptionRequiredException error.

For API access, you need an access key ID and a secret access key. Use temporary credentials instead of long-term access keys when possible. Temporary credentials include an access key ID, a secret access key, and a security token that indicates when the credentials expire. For more information, see Best practices for managing Amazon Web Services access keys in the Amazon Web Services General Reference.

You can use the Health endpoint health.us-east-1.amazonaws.com (HTTPS) to call the Health API operations. Health supports a multi-Region application architecture and has two regional endpoints in an active-passive configuration. You can use the high availability endpoint example to determine which Amazon Web Services Region is active, so that you can get the latest information from the API. For more information, see Accessing the Health API in the Health User Guide.

For authentication of requests, Health uses the Signature Version 4 Signing Process.

If your Amazon Web Services account is part of Organizations, you can use the Health organizational view feature. This feature provides a centralized view of Health events across all accounts in your organization. You can aggregate Health events in real time to identify accounts in your organization that are affected by an operational event or get notified of security vulnerabilities. Use the organizational view API operations to enable this feature and return event information. For more information, see Aggregating Health events in the Health User Guide.

When you use the Health API operations to return Health events, see the following recommendations:

", "operations": { "DescribeAffectedAccountsForOrganization": "

Returns a list of accounts in the organization from Organizations that are affected by the provided event. For more information about the different types of Health events, see Event.

Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", - "DescribeAffectedEntities": "

Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Services service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.

At least one event ARN is required.

", - "DescribeAffectedEntitiesForOrganization": "

Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in Organizations, based on the filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Services service.

At least one event Amazon Resource Name (ARN) and account ID are required.

Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

", + "DescribeAffectedEntities": "

Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.

At least one event ARN is required.

", + "DescribeAffectedEntitiesForOrganization": "

Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in Organizations, based on the filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Service.

At least one event Amazon Resource Name (ARN) and account ID are required.

Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

", "DescribeEntityAggregates": "

Returns the number of entities that are affected by each of the specified events.

", "DescribeEventAggregates": "

Returns the number of events of each event type (issue, scheduled change, and account notification). If no filter is specified, the counts of all events in each category are returned.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", "DescribeEventDetails": "

Returns detailed information about one or more specified events. Information includes standard event data (Amazon Web Services Region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included. To retrieve the entities, use the DescribeAffectedEntities operation.

If a specified event can't be retrieved, an error message is returned for that event.

This operation supports resource-level permissions. You can use this operation to allow or deny access to specific Health events. For more information, see Resource- and action-based conditions in the Health User Guide.

", "DescribeEventDetailsForOrganization": "

Returns detailed information about one or more specified events for one or more Amazon Web Services accounts in your organization. This information includes standard event data (such as the Amazon Web Services Region and service), an event description, and (depending on the event) possible metadata. This operation doesn't return affected entities, such as the resources related to the event. To return affected entities, use the DescribeAffectedEntitiesForOrganization operation.

Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

When you call the DescribeEventDetailsForOrganization operation, specify the organizationEventDetailFilters object in the request. Depending on the Health event type, note the following differences:

For more information, see Event.

This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific Health events. For more information, see Resource- and action-based conditions in the Health User Guide.

", - "DescribeEventTypes": "

Returns the event types that meet the specified filter criteria. You can use this API operation to find information about the Health event, such as the category, Amazon Web Services service, and event code. The metadata for each event appears in the EventType object.

If you don't specify a filter criteria, the API operation returns all event types, in no particular order.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", + "DescribeEventTypes": "

Returns the event types that meet the specified filter criteria. You can use this API operation to find information about the Health event, such as the category, Amazon Web Service, and event code. The metadata for each event appears in the EventType object.

If you don't specify a filter criteria, the API operation returns all event types, in no particular order.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", "DescribeEvents": "

Returns information about events that meet the specified filter criteria. Events are returned in a summary form and do not include the detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the DescribeEventDetails and DescribeAffectedEntities operations.

If no filter criteria are specified, all events are returned. Results are sorted by lastModifiedTime, starting with the most recent event.

", "DescribeEventsForOrganization": "

Returns information about events across your organization in Organizations. You can use thefilters parameter to specify the events that you want to return. Events are returned in a summary form and don't include the affected accounts, detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the following operations:

If you don't specify a filter, the DescribeEventsForOrganizations returns all events across your organization. Results are sorted by lastModifiedTime, starting with the most recent event.

For more information about the different types of Health events, see Event.

Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", - "DescribeHealthServiceStatusForOrganization": "

This operation provides status information on enabling or disabling Health to work with your organization. To call this operation, you must sign in as an IAM user, assume an IAM role, or sign in as the root user (not recommended) in the organization's management account.

", - "DisableHealthServiceAccessForOrganization": "

Disables Health from working with Organizations. To call this operation, you must sign in as an Identity and Access Management (IAM) user, assume an IAM role, or sign in as the root user (not recommended) in the organization's management account. For more information, see Aggregating Health events in the Health User Guide.

This operation doesn't remove the service-linked role from the management account in your organization. You must use the IAM console, API, or Command Line Interface (CLI) to remove the service-linked role. For more information, see Deleting a Service-Linked Role in the IAM User Guide.

You can also disable the organizational feature by using the Organizations DisableAWSServiceAccess API operation. After you call this operation, Health stops aggregating events for all other Amazon Web Services accounts in your organization. If you call the Health API operations for organizational view, Health returns an error. Health continues to aggregate health events for your Amazon Web Services account.

", + "DescribeHealthServiceStatusForOrganization": "

This operation provides status information on enabling or disabling Health to work with your organization. To call this operation, you must use the organization's management account.

", + "DisableHealthServiceAccessForOrganization": "

Disables Health from working with Organizations. To call this operation, you must sign in to the organization's management account. For more information, see Aggregating Health events in the Health User Guide.

This operation doesn't remove the service-linked role from the management account in your organization. You must use the IAM console, API, or Command Line Interface (CLI) to remove the service-linked role. For more information, see Deleting a Service-Linked Role in the IAM User Guide.

You can also disable the organizational feature by using the Organizations DisableAWSServiceAccess API operation. After you call this operation, Health stops aggregating events for all other Amazon Web Services accounts in your organization. If you call the Health API operations for organizational view, Health returns an error. Health continues to aggregate health events for your Amazon Web Services account.

", "EnableHealthServiceAccessForOrganization": "

Enables Health to work with Organizations. You can use the organizational view feature to aggregate events from all Amazon Web Services accounts in your organization in a centralized location.

This operation also creates a service-linked role for the management account in the organization.

To call this operation, you must meet the following requirements:

If you don't have the required support plan, you can instead use the Health console to enable the organizational view feature. For more information, see Aggregating Health events in the Health User Guide.

" }, "shapes": { @@ -504,9 +504,9 @@ "eventScopeCode": { "base": null, "refs": { - "DescribeAffectedAccountsForOrganizationResponse$eventScopeCode": "

This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.

", - "Event$eventScopeCode": "

This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.

", - "OrganizationEvent$eventScopeCode": "

This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.

" + "DescribeAffectedAccountsForOrganizationResponse$eventScopeCode": "

This parameter specifies if the Health event is a public Amazon Web Service event or an account-specific event.

", + "Event$eventScopeCode": "

This parameter specifies if the Health event is a public Amazon Web Service event or an account-specific event.

", + "OrganizationEvent$eventScopeCode": "

This parameter specifies if the Health event is a public Amazon Web Service event or an account-specific event.

" } }, "eventStatusCode": { @@ -587,7 +587,7 @@ "DescribeAffectedAccountsForOrganizationRequest$maxResults": "

The maximum number of items to return in one batch, between 10 and 100, inclusive.

", "DescribeAffectedEntitiesRequest$maxResults": "

The maximum number of items to return in one batch, between 10 and 100, inclusive.

", "DescribeEventAggregatesRequest$maxResults": "

The maximum number of items to return in one batch, between 10 and 100, inclusive.

", - "DescribeEventTypesRequest$maxResults": "

The maximum number of items to return in one batch, between 10 and 100, inclusive.

", + "DescribeEventTypesRequest$maxResults": "

The maximum number of items to return in one batch, between 10 and 100, inclusive.

If you don't specify the maxResults parameter, this operation returns a maximum of 30 items by default.

", "DescribeEventsRequest$maxResults": "

The maximum number of items to return in one batch, between 10 and 100, inclusive.

" } }, @@ -647,18 +647,18 @@ "service": { "base": null, "refs": { - "Event$service": "

The Amazon Web Services service that is affected by the event. For example, EC2, RDS.

", - "EventType$service": "

The Amazon Web Services service that is affected by the event. For example, EC2, RDS.

", - "OrganizationEvent$service": "

The Amazon Web Services service that is affected by the event, such as EC2 and RDS.

", + "Event$service": "

The Amazon Web Service that is affected by the event. For example, EC2, RDS.

", + "EventType$service": "

The Amazon Web Service that is affected by the event. For example, EC2, RDS.

", + "OrganizationEvent$service": "

The Amazon Web Service that is affected by the event, such as EC2 and RDS.

", "serviceList$member": null } }, "serviceList": { "base": null, "refs": { - "EventFilter$services": "

The Amazon Web Services services associated with the event. For example, EC2, RDS.

", - "EventTypeFilter$services": "

The Amazon Web Services services associated with the event. For example, EC2, RDS.

", - "OrganizationEventFilter$services": "

The Amazon Web Services services associated with the event. For example, EC2, RDS.

" + "EventFilter$services": "

The Amazon Web Services associated with the event. For example, EC2, RDS.

", + "EventTypeFilter$services": "

The Amazon Web Services associated with the event. For example, EC2, RDS.

", + "OrganizationEventFilter$services": "

The Amazon Web Services associated with the event. For example, EC2, RDS.

" } }, "string": { @@ -669,7 +669,7 @@ "EventDetailsErrorItem$errorMessage": "

A message that describes the error.

", "InvalidPaginationToken$message": null, "OrganizationAffectedEntitiesErrorItem$errorName": "

The name of the error.

", - "OrganizationAffectedEntitiesErrorItem$errorMessage": "

The unique identifier for the event type. The format is AWS_SERVICE_DESCRIPTION. For example, AWS_EC2_SYSTEM_MAINTENANCE_EVENT.

", + "OrganizationAffectedEntitiesErrorItem$errorMessage": "

A message that describes the error. Follow the error message and retry your request.

For example, the InvalidAccountInputError error message appears if you call the DescribeAffectedEntitiesForOrganization operation and specify the AccountSpecific value for the EventScopeCode parameter, but don't specify an Amazon Web Services account.

", "OrganizationEventDetailsErrorItem$errorName": "

The name of the error.

", "OrganizationEventDetailsErrorItem$errorMessage": "

A message that describes the error.

If you call the DescribeEventDetailsForOrganization operation and receive one of the following errors, follow the recommendations in the message:

", "UnsupportedLocale$message": null diff --git a/models/apis/health/2016-08-04/endpoint-rule-set-1.json b/models/apis/health/2016-08-04/endpoint-rule-set-1.json new file mode 100644 index 00000000000..d8ace323eae --- /dev/null +++ b/models/apis/health/2016-08-04/endpoint-rule-set-1.json @@ -0,0 +1,404 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://health-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://health-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://health.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "aws-global" + ] + } + ], + "endpoint": { + "url": "https://global.health.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "health", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "aws-cn-global" + ] + } + ], + "endpoint": { + "url": "https://global.health.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "health", + "signingRegion": "cn-northwest-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://health.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/models/apis/health/2016-08-04/endpoint-tests-1.json b/models/apis/health/2016-08-04/endpoint-tests-1.json new file mode 100644 index 00000000000..6209498f38c --- /dev/null +++ b/models/apis/health/2016-08-04/endpoint-tests-1.json @@ -0,0 +1,358 @@ +{ + "testCases": [ + { + "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "health", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://global.health.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "health", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://global.health.amazonaws.com.cn" + } + }, + "params": { + "Region": "aws-cn-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/ivs-realtime/2020-07-14/api-2.json b/models/apis/ivs-realtime/2020-07-14/api-2.json index 27858086e6c..45d459a07b1 100644 --- a/models/apis/ivs-realtime/2020-07-14/api-2.json +++ b/models/apis/ivs-realtime/2020-07-14/api-2.json @@ -79,6 +79,21 @@ {"shape":"PendingVerification"} ] }, + "GetParticipant":{ + "name":"GetParticipant", + "http":{ + "method":"POST", + "requestUri":"/GetParticipant", + "responseCode":200 + }, + "input":{"shape":"GetParticipantRequest"}, + "output":{"shape":"GetParticipantResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ] + }, "GetStage":{ "name":"GetStage", "http":{ @@ -94,6 +109,63 @@ {"shape":"AccessDeniedException"} ] }, + "GetStageSession":{ + "name":"GetStageSession", + "http":{ + "method":"POST", + "requestUri":"/GetStageSession", + "responseCode":200 + }, + "input":{"shape":"GetStageSessionRequest"}, + "output":{"shape":"GetStageSessionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListParticipantEvents":{ + "name":"ListParticipantEvents", + "http":{ + "method":"POST", + "requestUri":"/ListParticipantEvents", + "responseCode":200 + }, + "input":{"shape":"ListParticipantEventsRequest"}, + "output":{"shape":"ListParticipantEventsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListParticipants":{ + "name":"ListParticipants", + "http":{ + "method":"POST", + "requestUri":"/ListParticipants", + "responseCode":200 + }, + "input":{"shape":"ListParticipantsRequest"}, + "output":{"shape":"ListParticipantsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListStageSessions":{ + "name":"ListStageSessions", + "http":{ + "method":"POST", + "requestUri":"/ListStageSessions", + "responseCode":200 + }, + "input":{"shape":"ListStageSessionsRequest"}, + "output":{"shape":"ListStageSessionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ] + }, "ListStages":{ "name":"ListStages", "http":{ @@ -262,6 +334,57 @@ "members":{ } }, + "Event":{ + "type":"structure", + "members":{ + "errorCode":{"shape":"EventErrorCode"}, + "eventTime":{"shape":"Time"}, + "name":{"shape":"EventName"}, + "participantId":{"shape":"ParticipantId"}, + "remoteParticipantId":{"shape":"ParticipantId"} + } + }, + "EventErrorCode":{ + "type":"string", + "enum":["INSUFFICIENT_CAPABILITIES"] + }, + "EventList":{ + "type":"list", + "member":{"shape":"Event"} + }, + "EventName":{ + "type":"string", + "enum":[ + "JOINED", + "LEFT", + "PUBLISH_STARTED", + "PUBLISH_STOPPED", + "SUBSCRIBE_STARTED", + "SUBSCRIBE_STOPPED", + "PUBLISH_ERROR", + "SUBSCRIBE_ERROR", + "JOIN_ERROR" + ] + }, + "GetParticipantRequest":{ + "type":"structure", + "required":[ + "participantId", + "sessionId", + "stageArn" + ], + "members":{ + "participantId":{"shape":"ParticipantId"}, + "sessionId":{"shape":"StageSessionId"}, + "stageArn":{"shape":"StageArn"} + } + }, + "GetParticipantResponse":{ + "type":"structure", + "members":{ + "participant":{"shape":"Participant"} + } + }, "GetStageRequest":{ "type":"structure", "required":["arn"], @@ -275,6 +398,23 @@ "stage":{"shape":"Stage"} } }, + "GetStageSessionRequest":{ + "type":"structure", + "required":[ + "sessionId", + "stageArn" + ], + "members":{ + "sessionId":{"shape":"StageSessionId"}, + "stageArn":{"shape":"StageArn"} + } + }, + "GetStageSessionResponse":{ + "type":"structure", + "members":{ + "stageSession":{"shape":"StageSession"} + } + }, "InternalServerException":{ "type":"structure", "members":{ @@ -284,6 +424,70 @@ "exception":true, "fault":true }, + "ListParticipantEventsRequest":{ + "type":"structure", + "required":[ + "participantId", + "sessionId", + "stageArn" + ], + "members":{ + "maxResults":{"shape":"MaxParticipantEventResults"}, + "nextToken":{"shape":"PaginationToken"}, + "participantId":{"shape":"ParticipantId"}, + "sessionId":{"shape":"StageSessionId"}, + "stageArn":{"shape":"StageArn"} + } + }, + "ListParticipantEventsResponse":{ + "type":"structure", + "required":["events"], + "members":{ + "events":{"shape":"EventList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListParticipantsRequest":{ + "type":"structure", + "required":[ + "sessionId", + "stageArn" + ], + "members":{ + "filterByPublished":{"shape":"Published"}, + "filterByState":{"shape":"ParticipantState"}, + "filterByUserId":{"shape":"UserId"}, + "maxResults":{"shape":"MaxParticipantResults"}, + "nextToken":{"shape":"PaginationToken"}, + "sessionId":{"shape":"StageSessionId"}, + "stageArn":{"shape":"StageArn"} + } + }, + "ListParticipantsResponse":{ + "type":"structure", + "required":["participants"], + "members":{ + "nextToken":{"shape":"PaginationToken"}, + "participants":{"shape":"ParticipantList"} + } + }, + "ListStageSessionsRequest":{ + "type":"structure", + "required":["stageArn"], + "members":{ + "maxResults":{"shape":"MaxStageSessionResults"}, + "nextToken":{"shape":"PaginationToken"}, + "stageArn":{"shape":"StageArn"} + } + }, + "ListStageSessionsResponse":{ + "type":"structure", + "required":["stageSessions"], + "members":{ + "nextToken":{"shape":"PaginationToken"}, + "stageSessions":{"shape":"StageSessionList"} + } + }, "ListStagesRequest":{ "type":"structure", "members":{ @@ -317,17 +521,70 @@ "tags":{"shape":"Tags"} } }, + "MaxParticipantEventResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxParticipantResults":{ + "type":"integer", + "max":100, + "min":1 + }, "MaxStageResults":{ "type":"integer", "max":100, "min":1 }, + "MaxStageSessionResults":{ + "type":"integer", + "max":100, + "min":1 + }, "PaginationToken":{ "type":"string", "max":1024, "min":0, "pattern":"^[a-zA-Z0-9+/=_-]*$" }, + "Participant":{ + "type":"structure", + "members":{ + "attributes":{"shape":"ParticipantAttributes"}, + "firstJoinTime":{"shape":"Time"}, + "participantId":{"shape":"ParticipantId"}, + "published":{"shape":"Published"}, + "state":{"shape":"ParticipantState"}, + "userId":{"shape":"UserId"} + } + }, + "ParticipantAttributes":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ParticipantId":{"type":"string"}, + "ParticipantList":{ + "type":"list", + "member":{"shape":"ParticipantSummary"} + }, + "ParticipantState":{ + "type":"string", + "enum":[ + "CONNECTED", + "DISCONNECTED" + ] + }, + "ParticipantSummary":{ + "type":"structure", + "members":{ + "firstJoinTime":{"shape":"Time"}, + "participantId":{"shape":"ParticipantId"}, + "published":{"shape":"Published"}, + "state":{"shape":"ParticipantState"}, + "userId":{"shape":"UserId"} + } + }, "ParticipantToken":{ "type":"structure", "members":{ @@ -407,6 +664,7 @@ }, "exception":true }, + "Published":{"type":"boolean"}, "ResourceArn":{ "type":"string", "max":128, @@ -457,12 +715,32 @@ "min":0, "pattern":"^[a-zA-Z0-9-_]*$" }, + "StageSession":{ + "type":"structure", + "members":{ + "endTime":{"shape":"Time"}, + "sessionId":{"shape":"StageSessionId"}, + "startTime":{"shape":"Time"} + } + }, "StageSessionId":{ "type":"string", "max":16, "min":16, "pattern":"^st-[a-zA-Z0-9]+$" }, + "StageSessionList":{ + "type":"list", + "member":{"shape":"StageSessionSummary"} + }, + "StageSessionSummary":{ + "type":"structure", + "members":{ + "endTime":{"shape":"Time"}, + "sessionId":{"shape":"StageSessionId"}, + "startTime":{"shape":"Time"} + } + }, "StageSummary":{ "type":"structure", "required":["arn"], @@ -521,6 +799,10 @@ "max":50, "min":0 }, + "Time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -559,6 +841,11 @@ "stage":{"shape":"Stage"} } }, + "UserId":{ + "type":"string", + "max":128, + "min":0 + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/models/apis/ivs-realtime/2020-07-14/docs-2.json b/models/apis/ivs-realtime/2020-07-14/docs-2.json index d20f8107389..938ecac9039 100644 --- a/models/apis/ivs-realtime/2020-07-14/docs-2.json +++ b/models/apis/ivs-realtime/2020-07-14/docs-2.json @@ -1,12 +1,17 @@ { "version": "2.0", - "service": "

Introduction

The Amazon Interactive Video Service (IVS) stage API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

Terminology: The IVS stage API sometimes is referred to as the IVS RealTime API.

Resources

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS):

Tagging

A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS stage API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

At most 50 tags can be applied to a resource.

Stages Endpoints

Tags Endpoints

", + "service": "

Introduction

The Amazon Interactive Video Service (IVS) stage API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

Terminology:

Resources

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS):

Tagging

A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS stage API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

At most 50 tags can be applied to a resource.

Stages Endpoints

Tags Endpoints

", "operations": { "CreateParticipantToken": "

Creates an additional token for a specified stage. This can be done after stage creation or when tokens expire. Tokens always are scoped to the stage for which they are created.

Encryption keys are owned by Amazon IVS and never used directly by your application.

", "CreateStage": "

Creates a new stage (and optionally participant tokens).

", "DeleteStage": "

Shuts down and deletes the specified stage (disconnecting all participants).

", "DisconnectParticipant": "

Disconnects a specified participant and revokes the participant permanently from a specified stage.

", + "GetParticipant": "

Gets information about the specified participant token.

", "GetStage": "

Gets information for the specified stage.

", + "GetStageSession": "

Gets information for the specified stage session.

", + "ListParticipantEvents": "

Lists events for a specified participant that occurred during a specified stage session.

", + "ListParticipants": "

Lists all participants in a specified stage session.

", + "ListStageSessions": "

Gets all sessions for a specified stage.

", "ListStages": "

Gets summary information about all stages in your account, in the AWS region where the API request is processed.

", "ListTagsForResource": "

Gets information about AWS tags for the specified ARN.

", "TagResource": "

Adds or updates tags for the AWS resource with the specified ARN.

", @@ -70,6 +75,40 @@ "refs": { } }, + "Event": { + "base": "

An occurrence during a stage session.

", + "refs": { + "EventList$member": null + } + }, + "EventErrorCode": { + "base": null, + "refs": { + "Event$errorCode": "

If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the participant’s token is not allowed to do. For more information about participant capabilities, see the capabilities field in CreateParticipantToken.

" + } + }, + "EventList": { + "base": null, + "refs": { + "ListParticipantEventsResponse$events": "

List of the matching events.

" + } + }, + "EventName": { + "base": null, + "refs": { + "Event$name": "

The name of the event.

" + } + }, + "GetParticipantRequest": { + "base": null, + "refs": { + } + }, + "GetParticipantResponse": { + "base": null, + "refs": { + } + }, "GetStageRequest": { "base": null, "refs": { @@ -80,11 +119,51 @@ "refs": { } }, + "GetStageSessionRequest": { + "base": null, + "refs": { + } + }, + "GetStageSessionResponse": { + "base": null, + "refs": { + } + }, "InternalServerException": { "base": "

", "refs": { } }, + "ListParticipantEventsRequest": { + "base": null, + "refs": { + } + }, + "ListParticipantEventsResponse": { + "base": null, + "refs": { + } + }, + "ListParticipantsRequest": { + "base": null, + "refs": { + } + }, + "ListParticipantsResponse": { + "base": null, + "refs": { + } + }, + "ListStageSessionsRequest": { + "base": null, + "refs": { + } + }, + "ListStageSessionsResponse": { + "base": null, + "refs": { + } + }, "ListStagesRequest": { "base": null, "refs": { @@ -105,19 +184,86 @@ "refs": { } }, + "MaxParticipantEventResults": { + "base": null, + "refs": { + "ListParticipantEventsRequest$maxResults": "

Maximum number of results to return. Default: 50.

" + } + }, + "MaxParticipantResults": { + "base": null, + "refs": { + "ListParticipantsRequest$maxResults": "

Maximum number of results to return. Default: 50.

" + } + }, "MaxStageResults": { "base": null, "refs": { "ListStagesRequest$maxResults": "

Maximum number of results to return. Default: 50.

" } }, + "MaxStageSessionResults": { + "base": null, + "refs": { + "ListStageSessionsRequest$maxResults": "

Maximum number of results to return. Default: 50.

" + } + }, "PaginationToken": { "base": null, "refs": { + "ListParticipantEventsRequest$nextToken": "

The first participant to retrieve. This is used for pagination; see the nextToken response field.

", + "ListParticipantEventsResponse$nextToken": "

If there are more rooms than maxResults, use nextToken in the request to get the next set.

", + "ListParticipantsRequest$nextToken": "

The first participant to retrieve. This is used for pagination; see the nextToken response field.

", + "ListParticipantsResponse$nextToken": "

If there are more rooms than maxResults, use nextToken in the request to get the next set.

", + "ListStageSessionsRequest$nextToken": "

The first stage to retrieve. This is used for pagination; see the nextToken response field.

", + "ListStageSessionsResponse$nextToken": "

If there are more rooms than maxResults, use nextToken in the request to get the next set.

", "ListStagesRequest$nextToken": "

The first stage to retrieve. This is used for pagination; see the nextToken response field.

", "ListStagesResponse$nextToken": "

If there are more rooms than maxResults, use nextToken in the request to get the next set.

" } }, + "Participant": { + "base": "

Object describing a participant that has joined a stage.

", + "refs": { + "GetParticipantResponse$participant": "

The participant that is returned.

" + } + }, + "ParticipantAttributes": { + "base": null, + "refs": { + "Participant$attributes": "

Application-provided attributes to encode into the token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + } + }, + "ParticipantId": { + "base": null, + "refs": { + "Event$participantId": "

Unique identifier for the participant who triggered the event. This is assigned by IVS.

", + "Event$remoteParticipantId": "

Unique identifier for the remote participant. For a subscribe event, this is the publisher. For a publish or join event, this is null. This is assigned by IVS.

", + "GetParticipantRequest$participantId": "

Unique identifier for the participant. This is assigned by IVS and returned by CreateParticipantToken.

", + "ListParticipantEventsRequest$participantId": "

Unique identifier for this participant. This is assigned by IVS and returned by CreateParticipantToken.

", + "Participant$participantId": "

Unique identifier for this participant, assigned by IVS.

", + "ParticipantSummary$participantId": "

Unique identifier for this participant, assigned by IVS.

" + } + }, + "ParticipantList": { + "base": null, + "refs": { + "ListParticipantsResponse$participants": "

List of the matching participants (summary information only).

" + } + }, + "ParticipantState": { + "base": null, + "refs": { + "ListParticipantsRequest$filterByState": "

Filters the response list to only show participants in the specified state. Only one of filterByUserId, filterByPublished, or filterByState can be provided per request.

", + "Participant$state": "

Whether the participant is connected to or disconnected from the stage.

", + "ParticipantSummary$state": "

Whether the participant is connected to or disconnected from the stage.

" + } + }, + "ParticipantSummary": { + "base": "

Summary object describing a participant that has joined a stage.

", + "refs": { + "ParticipantList$member": null + } + }, "ParticipantToken": { "base": "

Object specifying a participant token in a stage.

", "refs": { @@ -162,9 +308,9 @@ "ParticipantTokenDurationMinutes": { "base": null, "refs": { - "CreateParticipantTokenRequest$duration": "

Duration (in minutes), after which the token expires. Default: 60 (1 hour).

", - "ParticipantToken$duration": "

Duration (in minutes), after which the participant token expires. Default: 60 (1 hour).

", - "ParticipantTokenConfiguration$duration": "

Duration (in minutes), after which the corresponding participant token expires. Default: 60 (1 hour).

" + "CreateParticipantTokenRequest$duration": "

Duration (in minutes), after which the token expires. Default: 720 (12 hours).

", + "ParticipantToken$duration": "

Duration (in minutes), after which the participant token expires. Default: 720 (12 hours).

", + "ParticipantTokenConfiguration$duration": "

Duration (in minutes), after which the corresponding participant token expires. Default: 720 (12 hours).

" } }, "ParticipantTokenExpirationTime": { @@ -176,7 +322,7 @@ "ParticipantTokenId": { "base": null, "refs": { - "DisconnectParticipantRequest$participantId": "

Identifier of the participant to be disconnected. This is returned by CreateParticipantToken.

", + "DisconnectParticipantRequest$participantId": "

Identifier of the participant to be disconnected. This is assigned by IVS and returned by CreateParticipantToken.

", "ParticipantToken$participantId": "

Unique identifier for this participant token, assigned by IVS.

" } }, @@ -196,8 +342,8 @@ "base": null, "refs": { "CreateParticipantTokenRequest$userId": "

Name that can be specified to help identify the token. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

", - "ParticipantToken$userId": "

Name to help identify the token. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

", - "ParticipantTokenConfiguration$userId": "

Name that can be specified to help identify the corresponding participant token. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + "ParticipantToken$userId": "

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

", + "ParticipantTokenConfiguration$userId": "

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" } }, "PendingVerification": { @@ -205,6 +351,14 @@ "refs": { } }, + "Published": { + "base": null, + "refs": { + "ListParticipantsRequest$filterByPublished": "

Filters the response list to only show participants who published during the stage session. Only one of filterByUserId, filterByPublished, or filterByState can be provided per request.

", + "Participant$published": "

Whether the participant ever published to the stage session.

", + "ParticipantSummary$published": "

Whether the participant ever published to the stage session.

" + } + }, "ResourceArn": { "base": null, "refs": { @@ -227,8 +381,8 @@ "base": "

Object specifying a stage.

", "refs": { "CreateStageResponse$stage": "

The stage that was created.

", - "GetStageResponse$stage": "

", - "UpdateStageResponse$stage": "

The updated stage.

" + "GetStageResponse$stage": "

The stage that is returned.

", + "UpdateStageResponse$stage": "

The updated stage.

" } }, "StageArn": { @@ -237,7 +391,12 @@ "CreateParticipantTokenRequest$stageArn": "

ARN of the stage to which this token is scoped.

", "DeleteStageRequest$arn": "

ARN of the stage to be deleted.

", "DisconnectParticipantRequest$stageArn": "

ARN of the stage to which the participant is attached.

", + "GetParticipantRequest$stageArn": "

Stage ARN.

", "GetStageRequest$arn": "

ARN of the stage for which the information is to be retrieved.

", + "GetStageSessionRequest$stageArn": "

ARN of the stage for which the information is to be retrieved.

", + "ListParticipantEventsRequest$stageArn": "

Stage ARN.

", + "ListParticipantsRequest$stageArn": "

Stage ARN.

", + "ListStageSessionsRequest$stageArn": "

Stage ARN.

", "Stage$arn": "

Stage ARN.

", "StageSummary$arn": "

Stage ARN.

", "UpdateStageRequest$arn": "

ARN of the stage to be updated.

" @@ -252,13 +411,37 @@ "UpdateStageRequest$name": "

Name of the stage to be updated.

" } }, + "StageSession": { + "base": "

A stage session begins when the first participant joins a stage and ends after the last participant leaves the stage. A stage session helps with debugging stages by grouping events and participants into shorter periods of time (i.e., a session), which is helpful when stages are used over long periods of time.

", + "refs": { + "GetStageSessionResponse$stageSession": "

The stage session that is returned.

" + } + }, "StageSessionId": { "base": null, "refs": { + "GetParticipantRequest$sessionId": "

ID of a session within the stage.

", + "GetStageSessionRequest$sessionId": "

ID of a session within the stage.

", + "ListParticipantEventsRequest$sessionId": "

ID of a session within the stage.

", + "ListParticipantsRequest$sessionId": "

ID of the session within the stage.

", "Stage$activeSessionId": "

ID of the active session within the stage.

", + "StageSession$sessionId": "

ID of the session within the stage.

", + "StageSessionSummary$sessionId": "

ID of the session within the stage.

", "StageSummary$activeSessionId": "

ID of the active session within the stage.

" } }, + "StageSessionList": { + "base": null, + "refs": { + "ListStageSessionsResponse$stageSessions": "

List of matching stage sessions.

" + } + }, + "StageSessionSummary": { + "base": "

Summary information about a stage session.

", + "refs": { + "StageSessionList$member": null + } + }, "StageSummary": { "base": "

Summary information about a stage.

", "refs": { @@ -274,6 +457,8 @@ "String": { "base": null, "refs": { + "ParticipantAttributes$key": null, + "ParticipantAttributes$value": null, "ParticipantTokenAttributes$key": null, "ParticipantTokenAttributes$value": null } @@ -317,6 +502,18 @@ "TagResourceRequest$tags": "

Array of tags to be added or updated. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints beyond what is documented there.

" } }, + "Time": { + "base": null, + "refs": { + "Event$eventTime": "

ISO 8601 timestamp (returned as a string) for when the event occurred.

", + "Participant$firstJoinTime": "

ISO 8601 timestamp (returned as a string) when the participant first joined the stage session.

", + "ParticipantSummary$firstJoinTime": "

ISO 8601 timestamp (returned as a string) when the participant first joined the stage session.

", + "StageSession$endTime": "

ISO 8601 timestamp (returned as a string) when the stage session ended. This is null if the stage is active.

", + "StageSession$startTime": "

ISO 8601 timestamp (returned as a string) when this stage session began.

", + "StageSessionSummary$endTime": "

ISO 8601 timestamp (returned as a string) when the stage session ended. This is null if the stage is active.

", + "StageSessionSummary$startTime": "

ISO 8601 timestamp (returned as a string) when this stage session began.

" + } + }, "UntagResourceRequest": { "base": null, "refs": { @@ -337,6 +534,14 @@ "refs": { } }, + "UserId": { + "base": null, + "refs": { + "ListParticipantsRequest$filterByUserId": "

Filters the response list to match the specified user ID. Only one of filterByUserId, filterByPublished, or filterByState can be provided per request. A userId is a customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems.

", + "Participant$userId": "

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

", + "ParticipantSummary$userId": "

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + } + }, "ValidationException": { "base": "

", "refs": { diff --git a/models/apis/ivs-realtime/2020-07-14/paginators-1.json b/models/apis/ivs-realtime/2020-07-14/paginators-1.json index 23f23f56b01..006aff958af 100644 --- a/models/apis/ivs-realtime/2020-07-14/paginators-1.json +++ b/models/apis/ivs-realtime/2020-07-14/paginators-1.json @@ -1,5 +1,20 @@ { "pagination": { + "ListParticipantEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListParticipants": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListStageSessions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, "ListStages": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/models/apis/omics/2022-11-28/api-2.json b/models/apis/omics/2022-11-28/api-2.json index 14f9c449735..9c19faa11a4 100644 --- a/models/apis/omics/2022-11-28/api-2.json +++ b/models/apis/omics/2022-11-28/api-2.json @@ -11,7 +11,52 @@ "uid": "omics-2022-11-28" }, "operations": { + "AbortMultipartReadSetUpload": { + "authtype": "v4", + "endpoint": { + "hostPrefix": "control-storage-" + }, + "errors": [ + { + "shape": "InternalServerException" + }, + { + "shape": "NotSupportedOperationException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "RequestTimeoutException" + } + ], + "http": { + "method": "DELETE", + "requestUri": "/sequencestore/{sequenceStoreId}/upload/{uploadId}/abort", + "responseCode": 200 + }, + "input": { + "shape": "AbortMultipartReadSetUploadRequest" + }, + "name": "AbortMultipartReadSetUpload", + "output": { + "shape": "AbortMultipartReadSetUploadResponse" + } + }, "BatchDeleteReadSet": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -50,6 +95,7 @@ } }, "CancelAnnotationImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -85,6 +131,7 @@ } }, "CancelRun": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -125,6 +172,7 @@ "name": "CancelRun" }, "CancelVariantImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -159,7 +207,52 @@ "shape": "CancelVariantImportResponse" } }, + "CompleteMultipartReadSetUpload": { + "authtype": "v4", + "endpoint": { + "hostPrefix": "storage-" + }, + "errors": [ + { + "shape": "InternalServerException" + }, + { + "shape": "NotSupportedOperationException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "RequestTimeoutException" + } + ], + "http": { + "method": "POST", + "requestUri": "/sequencestore/{sequenceStoreId}/upload/{uploadId}/complete", + "responseCode": 200 + }, + "input": { + "shape": "CompleteMultipartReadSetUploadRequest" + }, + "name": "CompleteMultipartReadSetUpload", + "output": { + "shape": "CompleteMultipartReadSetUploadResponse" + } + }, "CreateAnnotationStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -199,7 +292,52 @@ "shape": "CreateAnnotationStoreResponse" } }, + "CreateMultipartReadSetUpload": { + "authtype": "v4", + "endpoint": { + "hostPrefix": "control-storage-" + }, + "errors": [ + { + "shape": "InternalServerException" + }, + { + "shape": "NotSupportedOperationException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "RequestTimeoutException" + } + ], + "http": { + "method": "POST", + "requestUri": "/sequencestore/{sequenceStoreId}/upload", + "responseCode": 200 + }, + "input": { + "shape": "CreateMultipartReadSetUploadRequest" + }, + "name": "CreateMultipartReadSetUpload", + "output": { + "shape": "CreateMultipartReadSetUploadResponse" + } + }, "CreateReferenceStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -237,6 +375,7 @@ } }, "CreateRunGroup": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -280,6 +419,7 @@ } }, "CreateSequenceStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -317,6 +457,7 @@ } }, "CreateVariantStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -357,6 +498,7 @@ } }, "CreateWorkflow": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -400,6 +542,7 @@ } }, "DeleteAnnotationStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -438,6 +581,7 @@ } }, "DeleteReference": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -479,6 +623,7 @@ } }, "DeleteReferenceStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -520,6 +665,7 @@ } }, "DeleteRun": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -561,6 +707,7 @@ "name": "DeleteRun" }, "DeleteRunGroup": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -602,6 +749,7 @@ "name": "DeleteRunGroup" }, "DeleteSequenceStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -643,6 +791,7 @@ } }, "DeleteVariantStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -681,6 +830,7 @@ } }, "DeleteWorkflow": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -722,6 +872,7 @@ "name": "DeleteWorkflow" }, "GetAnnotationImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -756,6 +907,7 @@ } }, "GetAnnotationStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -790,6 +942,7 @@ } }, "GetReadSet": { + "authtype": "v4", "endpoint": { "hostPrefix": "storage-" }, @@ -833,6 +986,7 @@ } }, "GetReadSetActivationJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -870,6 +1024,7 @@ } }, "GetReadSetExportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -907,6 +1062,7 @@ } }, "GetReadSetImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -944,6 +1100,7 @@ } }, "GetReadSetMetadata": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -981,6 +1138,7 @@ } }, "GetReference": { + "authtype": "v4", "endpoint": { "hostPrefix": "storage-" }, @@ -1021,6 +1179,7 @@ } }, "GetReferenceImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1058,6 +1217,7 @@ } }, "GetReferenceMetadata": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1095,6 +1255,7 @@ } }, "GetReferenceStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1132,6 +1293,7 @@ } }, "GetRun": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -1175,6 +1337,7 @@ } }, "GetRunGroup": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -1218,6 +1381,7 @@ } }, "GetRunTask": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -1261,6 +1425,7 @@ } }, "GetSequenceStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1298,6 +1463,7 @@ } }, "GetVariantImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -1332,6 +1498,7 @@ } }, "GetVariantStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -1366,6 +1533,7 @@ } }, "GetWorkflow": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -1409,6 +1577,7 @@ } }, "ListAnnotationImportJobs": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -1443,6 +1612,7 @@ } }, "ListAnnotationStores": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -1476,7 +1646,52 @@ "shape": "ListAnnotationStoresResponse" } }, + "ListMultipartReadSetUploads": { + "authtype": "v4", + "endpoint": { + "hostPrefix": "control-storage-" + }, + "errors": [ + { + "shape": "InternalServerException" + }, + { + "shape": "NotSupportedOperationException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "RequestTimeoutException" + } + ], + "http": { + "method": "POST", + "requestUri": "/sequencestore/{sequenceStoreId}/uploads", + "responseCode": 200 + }, + "input": { + "shape": "ListMultipartReadSetUploadsRequest" + }, + "name": "ListMultipartReadSetUploads", + "output": { + "shape": "ListMultipartReadSetUploadsResponse" + } + }, "ListReadSetActivationJobs": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1514,6 +1729,7 @@ } }, "ListReadSetExportJobs": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1551,6 +1767,7 @@ } }, "ListReadSetImportJobs": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1587,7 +1804,52 @@ "shape": "ListReadSetImportJobsResponse" } }, + "ListReadSetUploadParts": { + "authtype": "v4", + "endpoint": { + "hostPrefix": "control-storage-" + }, + "errors": [ + { + "shape": "InternalServerException" + }, + { + "shape": "NotSupportedOperationException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "RequestTimeoutException" + } + ], + "http": { + "method": "POST", + "requestUri": "/sequencestore/{sequenceStoreId}/upload/{uploadId}/parts", + "responseCode": 200 + }, + "input": { + "shape": "ListReadSetUploadPartsRequest" + }, + "name": "ListReadSetUploadParts", + "output": { + "shape": "ListReadSetUploadPartsResponse" + } + }, "ListReadSets": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1625,6 +1887,7 @@ } }, "ListReferenceImportJobs": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1662,6 +1925,7 @@ } }, "ListReferenceStores": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1696,6 +1960,7 @@ } }, "ListReferences": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1733,6 +1998,7 @@ } }, "ListRunGroups": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -1776,6 +2042,7 @@ } }, "ListRunTasks": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -1819,6 +2086,7 @@ } }, "ListRuns": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -1862,6 +2130,7 @@ } }, "ListSequenceStores": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -1896,6 +2165,7 @@ } }, "ListTagsForResource": { + "authtype": "v4", "endpoint": { "hostPrefix": "tags-" }, @@ -1939,6 +2209,7 @@ } }, "ListVariantImportJobs": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -1973,6 +2244,7 @@ } }, "ListVariantStores": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -2007,6 +2279,7 @@ } }, "ListWorkflows": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -2050,6 +2323,7 @@ } }, "StartAnnotationImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -2087,6 +2361,7 @@ } }, "StartReadSetActivationJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -2127,6 +2402,7 @@ } }, "StartReadSetExportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -2167,6 +2443,7 @@ } }, "StartReadSetImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -2207,6 +2484,7 @@ } }, "StartReferenceImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "control-storage-" }, @@ -2247,6 +2525,7 @@ } }, "StartRun": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -2290,6 +2569,7 @@ } }, "StartVariantImportJob": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -2327,6 +2607,7 @@ } }, "TagResource": { + "authtype": "v4", "endpoint": { "hostPrefix": "tags-" }, @@ -2370,6 +2651,7 @@ } }, "UntagResource": { + "authtype": "v4", "endpoint": { "hostPrefix": "tags-" }, @@ -2414,6 +2696,7 @@ } }, "UpdateAnnotationStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -2448,6 +2731,7 @@ } }, "UpdateRunGroup": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -2488,6 +2772,7 @@ "name": "UpdateRunGroup" }, "UpdateVariantStore": { + "authtype": "v4", "endpoint": { "hostPrefix": "analytics-" }, @@ -2522,6 +2807,7 @@ } }, "UpdateWorkflow": { + "authtype": "v4", "endpoint": { "hostPrefix": "workflows-" }, @@ -2560,9 +2846,84 @@ "shape": "UpdateWorkflowRequest" }, "name": "UpdateWorkflow" + }, + "UploadReadSetPart": { + "authtype": "v4-unsigned-body", + "endpoint": { + "hostPrefix": "storage-" + }, + "errors": [ + { + "shape": "InternalServerException" + }, + { + "shape": "NotSupportedOperationException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "RequestTimeoutException" + } + ], + "http": { + "method": "PUT", + "requestUri": "/sequencestore/{sequenceStoreId}/upload/{uploadId}/part", + "responseCode": 200 + }, + "input": { + "shape": "UploadReadSetPartRequest" + }, + "name": "UploadReadSetPart", + "output": { + "shape": "UploadReadSetPartResponse" + } } }, "shapes": { + "AbortMultipartReadSetUploadRequest": { + "members": { + "sequenceStoreId": { + "location": "uri", + "locationName": "sequenceStoreId", + "shape": "SequenceStoreId" + }, + "uploadId": { + "location": "uri", + "locationName": "uploadId", + "shape": "UploadId" + } + }, + "required": [ + "sequenceStoreId", + "uploadId" + ], + "type": "structure" + }, + "AbortMultipartReadSetUploadResponse": { + "members": {}, + "type": "structure" + }, + "Accelerators": { + "enum": [ + "GPU" + ], + "max": 64, + "min": 1, + "type": "string" + }, "AccessDeniedException": { "error": { "httpStatusCode": 403, @@ -2612,10 +2973,10 @@ } }, "required": [ - "creationTime", "id", "sequenceStoreId", - "status" + "status", + "creationTime" ], "type": "structure" }, @@ -2652,21 +3013,40 @@ "ActivationJobId": { "max": 36, "min": 10, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, - "AnnotationImportItemDetail": { - "members": { - "jobStatus": { - "shape": "JobStatus" - }, - "source": { - "shape": "S3Uri" - } + "AnnotationFieldMap": { + "key": { + "shape": "AnnotationFieldMapKeyString" + }, + "type": "map", + "value": { + "shape": "AnnotationFieldMapValueString" + } + }, + "AnnotationFieldMapKeyString": { + "max": 21, + "min": 1, + "type": "string" + }, + "AnnotationFieldMapValueString": { + "max": 21, + "min": 1, + "type": "string" + }, + "AnnotationImportItemDetail": { + "members": { + "jobStatus": { + "shape": "JobStatus" + }, + "source": { + "shape": "S3Uri" + } }, "required": [ - "jobStatus", - "source" + "source", + "jobStatus" ], "type": "structure" }, @@ -2698,6 +3078,9 @@ }, "AnnotationImportJobItem": { "members": { + "annotationFields": { + "shape": "AnnotationFieldMap" + }, "completionTime": { "shape": "CompletionTime" }, @@ -2724,11 +3107,11 @@ } }, "required": [ - "creationTime", - "destinationName", "id", + "destinationName", "roleArn", "status", + "creationTime", "updateTime" ], "type": "structure" @@ -2779,18 +3162,18 @@ } }, "required": [ - "creationTime", - "description", "id", - "name", "reference", - "sseConfig", "status", - "statusMessage", "storeArn", + "name", "storeFormat", - "storeSizeBytes", - "updateTime" + "description", + "sseConfig", + "creationTime", + "updateTime", + "statusMessage", + "storeSizeBytes" ], "type": "structure" }, @@ -2815,7 +3198,7 @@ "Arn": { "max": 2048, "min": 20, - "pattern": "^arn:([^: ]*):([^: ]*):([^: ]*):([0-9]{12}):([^: ]*)$", + "pattern": "arn:([^: ]*):([^: ]*):([^: ]*):([0-9]{12}):([^: ]*)", "type": "string" }, "BatchDeleteReadSetRequest": { @@ -2900,7 +3283,7 @@ "ClientToken": { "max": 127, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "CommentChar": { @@ -2908,6 +3291,71 @@ "min": 1, "type": "string" }, + "CompleteMultipartReadSetUploadRequest": { + "members": { + "parts": { + "shape": "CompleteReadSetUploadPartList" + }, + "sequenceStoreId": { + "location": "uri", + "locationName": "sequenceStoreId", + "shape": "SequenceStoreId" + }, + "uploadId": { + "location": "uri", + "locationName": "uploadId", + "shape": "UploadId" + } + }, + "required": [ + "sequenceStoreId", + "uploadId", + "parts" + ], + "type": "structure" + }, + "CompleteMultipartReadSetUploadResponse": { + "members": { + "readSetId": { + "shape": "ReadSetId" + } + }, + "required": [ + "readSetId" + ], + "type": "structure" + }, + "CompleteReadSetUploadPartList": { + "member": { + "shape": "CompleteReadSetUploadPartListItem" + }, + "type": "list" + }, + "CompleteReadSetUploadPartListItem": { + "members": { + "checksum": { + "shape": "String" + }, + "partNumber": { + "shape": "CompleteReadSetUploadPartListItemPartNumberInteger" + }, + "partSource": { + "shape": "ReadSetPartSource" + } + }, + "required": [ + "partNumber", + "partSource", + "checksum" + ], + "type": "structure" + }, + "CompleteReadSetUploadPartListItemPartNumberInteger": { + "box": true, + "max": 10000, + "min": 1, + "type": "integer" + }, "CompletionTime": { "timestampFormat": "iso8601", "type": "timestamp" @@ -2958,7 +3406,7 @@ "type": "structure" }, "CreateAnnotationStoreRequestNameString": { - "pattern": "^([a-z]){1}([a-z0-9_]){2,254}$", + "pattern": "([a-z]){1}([a-z0-9_]){2,254}", "type": "string" }, "CreateAnnotationStoreResponse": { @@ -2986,10 +3434,102 @@ } }, "required": [ - "creationTime", "id", + "status", "name", - "status" + "creationTime" + ], + "type": "structure" + }, + "CreateMultipartReadSetUploadRequest": { + "members": { + "clientToken": { + "shape": "ClientToken" + }, + "description": { + "shape": "ReadSetDescription" + }, + "generatedFrom": { + "shape": "GeneratedFrom" + }, + "name": { + "shape": "ReadSetName" + }, + "referenceArn": { + "shape": "ReferenceArn" + }, + "sampleId": { + "shape": "SampleId" + }, + "sequenceStoreId": { + "location": "uri", + "locationName": "sequenceStoreId", + "shape": "SequenceStoreId" + }, + "sourceFileType": { + "shape": "FileType" + }, + "subjectId": { + "shape": "SubjectId" + }, + "tags": { + "shape": "TagMap" + } + }, + "required": [ + "sequenceStoreId", + "sourceFileType", + "subjectId", + "sampleId", + "referenceArn", + "name" + ], + "type": "structure" + }, + "CreateMultipartReadSetUploadResponse": { + "members": { + "creationTime": { + "shape": "SyntheticTimestamp_date_time" + }, + "description": { + "shape": "ReadSetDescription" + }, + "generatedFrom": { + "shape": "GeneratedFrom" + }, + "name": { + "shape": "ReadSetName" + }, + "referenceArn": { + "shape": "ReferenceArn" + }, + "sampleId": { + "shape": "SampleId" + }, + "sequenceStoreId": { + "shape": "SequenceStoreId" + }, + "sourceFileType": { + "shape": "FileType" + }, + "subjectId": { + "shape": "SubjectId" + }, + "tags": { + "shape": "TagMap" + }, + "uploadId": { + "shape": "UploadId" + } + }, + "required": [ + "sequenceStoreId", + "uploadId", + "sourceFileType", + "subjectId", + "sampleId", + "referenceArn", + "creationTime" ], "type": "structure" }, @@ -3038,9 +3578,9 @@ } }, "required": [ + "id", "arn", - "creationTime", - "id" + "creationTime" ], "type": "structure" }, @@ -3052,6 +3592,9 @@ "maxDuration": { "shape": "CreateRunGroupRequestMaxDurationInteger" }, + "maxGpus": { + "shape": "CreateRunGroupRequestMaxGpusInteger" + }, "maxRuns": { "shape": "CreateRunGroupRequestMaxRunsInteger" }, @@ -3083,6 +3626,12 @@ "min": 1, "type": "integer" }, + "CreateRunGroupRequestMaxGpusInteger": { + "box": true, + "max": 100000, + "min": 1, + "type": "integer" + }, "CreateRunGroupRequestMaxRunsInteger": { "box": true, "max": 100000, @@ -3111,6 +3660,9 @@ "description": { "shape": "SequenceStoreDescription" }, + "fallbackLocation": { + "shape": "S3Destination" + }, "name": { "shape": "SequenceStoreName" }, @@ -3137,6 +3689,9 @@ "description": { "shape": "SequenceStoreDescription" }, + "fallbackLocation": { + "shape": "S3Destination" + }, "id": { "shape": "SequenceStoreId" }, @@ -3148,9 +3703,9 @@ } }, "required": [ + "id", "arn", - "creationTime", - "id" + "creationTime" ], "type": "structure" }, @@ -3178,7 +3733,7 @@ "type": "structure" }, "CreateVariantStoreRequestNameString": { - "pattern": "^([a-z]){1}([a-z0-9_]){2,254}$", + "pattern": "([a-z]){1}([a-z0-9_]){2,254}", "type": "string" }, "CreateVariantStoreResponse": { @@ -3200,15 +3755,18 @@ } }, "required": [ - "creationTime", "id", + "status", "name", - "status" + "creationTime" ], "type": "structure" }, "CreateWorkflowRequest": { "members": { + "accelerators": { + "shape": "Accelerators" + }, "definitionUri": { "shape": "WorkflowDefinition" }, @@ -3449,7 +4007,7 @@ "ExportJobId": { "max": 36, "min": 10, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "ExportReadSet": { @@ -3523,11 +4081,11 @@ } }, "required": [ - "creationTime", - "destination", "id", "sequenceStoreId", - "status" + "destination", + "status", + "creationTime" ], "type": "structure" }, @@ -3617,7 +4175,7 @@ "GeneratedFrom": { "max": 127, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "GetAnnotationImportRequest": { @@ -3635,6 +4193,9 @@ }, "GetAnnotationImportResponse": { "members": { + "annotationFields": { + "shape": "AnnotationFieldMap" + }, "completionTime": { "shape": "CompletionTime" }, @@ -3670,17 +4231,17 @@ } }, "required": [ - "completionTime", - "creationTime", - "destinationName", - "formatOptions", "id", - "items", + "destinationName", "roleArn", - "runLeftNormalization", "status", "statusMessage", - "updateTime" + "creationTime", + "updateTime", + "completionTime", + "items", + "runLeftNormalization", + "formatOptions" ], "type": "structure" }, @@ -3743,18 +4304,18 @@ } }, "required": [ - "creationTime", - "description", "id", - "name", "reference", - "sseConfig", "status", - "statusMessage", "storeArn", - "storeSizeBytes", + "name", + "description", + "sseConfig", + "creationTime", + "updateTime", "tags", - "updateTime" + "statusMessage", + "storeSizeBytes" ], "type": "structure" }, @@ -3802,10 +4363,10 @@ } }, "required": [ - "creationTime", "id", "sequenceStoreId", - "status" + "status", + "creationTime" ], "type": "structure" }, @@ -3823,8 +4384,8 @@ } }, "required": [ - "id", - "sequenceStoreId" + "sequenceStoreId", + "id" ], "type": "structure" }, @@ -3856,11 +4417,11 @@ } }, "required": [ - "creationTime", - "destination", "id", "sequenceStoreId", - "status" + "destination", + "status", + "creationTime" ], "type": "structure" }, @@ -3911,12 +4472,12 @@ } }, "required": [ - "creationTime", "id", - "roleArn", "sequenceStoreId", - "sources", - "status" + "roleArn", + "status", + "creationTime", + "sources" ], "type": "structure" }, @@ -3977,17 +4538,20 @@ "status": { "shape": "ReadSetStatus" }, + "statusMessage": { + "shape": "ReadSetStatusMessage" + }, "subjectId": { "shape": "SubjectId" } }, "required": [ - "arn", - "creationTime", - "fileType", "id", + "arn", "sequenceStoreId", - "status" + "status", + "fileType", + "creationTime" ], "type": "structure" }, @@ -4016,8 +4580,8 @@ }, "required": [ "id", - "partNumber", - "sequenceStoreId" + "sequenceStoreId", + "partNumber" ], "type": "structure" }, @@ -4083,12 +4647,12 @@ } }, "required": [ - "creationTime", "id", "referenceStoreId", "roleArn", - "sources", - "status" + "status", + "creationTime", + "sources" ], "type": "structure" }, @@ -4145,11 +4709,11 @@ } }, "required": [ - "arn", - "creationTime", "id", - "md5", + "arn", "referenceStoreId", + "md5", + "creationTime", "updateTime" ], "type": "structure" @@ -4184,8 +4748,8 @@ }, "required": [ "id", - "partNumber", - "referenceStoreId" + "referenceStoreId", + "partNumber" ], "type": "structure" }, @@ -4239,9 +4803,9 @@ } }, "required": [ + "id", "arn", - "creationTime", - "id" + "creationTime" ], "type": "structure" }, @@ -4275,6 +4839,9 @@ "maxDuration": { "shape": "GetRunGroupResponseMaxDurationInteger" }, + "maxGpus": { + "shape": "GetRunGroupResponseMaxGpusInteger" + }, "maxRuns": { "shape": "GetRunGroupResponseMaxRunsInteger" }, @@ -4299,6 +4866,12 @@ "min": 1, "type": "integer" }, + "GetRunGroupResponseMaxGpusInteger": { + "box": true, + "max": 100000, + "min": 1, + "type": "integer" + }, "GetRunGroupResponseMaxRunsInteger": { "box": true, "max": 100000, @@ -4325,6 +4898,9 @@ }, "GetRunResponse": { "members": { + "accelerators": { + "shape": "Accelerators" + }, "arn": { "shape": "RunArn" }, @@ -4433,6 +5009,9 @@ "creationTime": { "shape": "TaskTimestamp" }, + "gpus": { + "shape": "GetRunTaskResponseGpusInteger" + }, "logStream": { "shape": "TaskLogStream" }, @@ -4465,6 +5044,11 @@ "min": 1, "type": "integer" }, + "GetRunTaskResponseGpusInteger": { + "box": true, + "min": 0, + "type": "integer" + }, "GetRunTaskResponseMemoryInteger": { "box": true, "min": 1, @@ -4494,6 +5078,9 @@ "description": { "shape": "SequenceStoreDescription" }, + "fallbackLocation": { + "shape": "S3Destination" + }, "id": { "shape": "SequenceStoreId" }, @@ -4505,9 +5092,9 @@ } }, "required": [ + "id", "arn", - "creationTime", - "id" + "creationTime" ], "type": "structure" }, @@ -4526,6 +5113,9 @@ }, "GetVariantImportResponse": { "members": { + "annotationFields": { + "shape": "AnnotationFieldMap" + }, "completionTime": { "shape": "CompletionTime" }, @@ -4558,15 +5148,15 @@ } }, "required": [ - "creationTime", - "destinationName", "id", - "items", + "destinationName", "roleArn", - "runLeftNormalization", "status", "statusMessage", - "updateTime" + "creationTime", + "updateTime", + "items", + "runLeftNormalization" ], "type": "structure" }, @@ -4623,18 +5213,18 @@ } }, "required": [ - "creationTime", - "description", "id", - "name", "reference", - "sseConfig", "status", - "statusMessage", "storeArn", - "storeSizeBytes", + "name", + "description", + "sseConfig", + "creationTime", + "updateTime", "tags", - "updateTime" + "statusMessage", + "storeSizeBytes" ], "type": "structure" }, @@ -4663,6 +5253,9 @@ }, "GetWorkflowResponse": { "members": { + "accelerators": { + "shape": "Accelerators" + }, "arn": { "shape": "WorkflowArn" }, @@ -4687,6 +5280,9 @@ "main": { "shape": "WorkflowMain" }, + "metadata": { + "shape": "WorkflowMetadata" + }, "name": { "shape": "WorkflowName" }, @@ -4723,7 +5319,7 @@ "ImportJobId": { "max": 36, "min": 10, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "ImportReadSetFilter": { @@ -4762,11 +5358,11 @@ } }, "required": [ - "creationTime", "id", - "roleArn", "sequenceStoreId", - "status" + "roleArn", + "status", + "creationTime" ], "type": "structure" }, @@ -4813,11 +5409,11 @@ } }, "required": [ - "sampleId", - "sourceFileType", "sourceFiles", + "sourceFileType", "status", - "subjectId" + "subjectId", + "sampleId" ], "type": "structure" }, @@ -4863,11 +5459,11 @@ } }, "required": [ - "creationTime", "id", "referenceStoreId", "roleArn", - "status" + "status", + "creationTime" ], "type": "structure" }, @@ -4942,7 +5538,7 @@ "JobStatusMessage": { "max": 127, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "JobStatusMsg": { @@ -5074,6 +5670,46 @@ }, "type": "structure" }, + "ListMultipartReadSetUploadsRequest": { + "members": { + "maxResults": { + "location": "querystring", + "locationName": "maxResults", + "shape": "ListMultipartReadSetUploadsRequestMaxResultsInteger" + }, + "nextToken": { + "location": "querystring", + "locationName": "nextToken", + "shape": "NextToken" + }, + "sequenceStoreId": { + "location": "uri", + "locationName": "sequenceStoreId", + "shape": "SequenceStoreId" + } + }, + "required": [ + "sequenceStoreId" + ], + "type": "structure" + }, + "ListMultipartReadSetUploadsRequestMaxResultsInteger": { + "box": true, + "max": 100, + "min": 1, + "type": "integer" + }, + "ListMultipartReadSetUploadsResponse": { + "members": { + "nextToken": { + "shape": "NextToken" + }, + "uploads": { + "shape": "MultipartReadSetUploadList" + } + }, + "type": "structure" + }, "ListReadSetActivationJobsRequest": { "members": { "filter": { @@ -5203,6 +5839,59 @@ }, "type": "structure" }, + "ListReadSetUploadPartsRequest": { + "members": { + "filter": { + "shape": "ReadSetUploadPartListFilter" + }, + "maxResults": { + "location": "querystring", + "locationName": "maxResults", + "shape": "ListReadSetUploadPartsRequestMaxResultsInteger" + }, + "nextToken": { + "location": "querystring", + "locationName": "nextToken", + "shape": "NextToken" + }, + "partSource": { + "shape": "ReadSetPartSource" + }, + "sequenceStoreId": { + "location": "uri", + "locationName": "sequenceStoreId", + "shape": "SequenceStoreId" + }, + "uploadId": { + "location": "uri", + "locationName": "uploadId", + "shape": "UploadId" + } + }, + "required": [ + "sequenceStoreId", + "uploadId", + "partSource" + ], + "type": "structure" + }, + "ListReadSetUploadPartsRequestMaxResultsInteger": { + "box": true, + "max": 100, + "min": 1, + "type": "integer" + }, + "ListReadSetUploadPartsResponse": { + "members": { + "nextToken": { + "shape": "NextToken" + }, + "parts": { + "shape": "ReadSetUploadPartList" + } + }, + "type": "structure" + }, "ListReadSetsRequest": { "members": { "filter": { @@ -5479,6 +6168,11 @@ "location": "querystring", "locationName": "startingToken", "shape": "RunListToken" + }, + "status": { + "location": "querystring", + "locationName": "status", + "shape": "RunStatus" } }, "type": "structure" @@ -5725,22 +6419,92 @@ }, "type": "structure" }, - "Long": { - "box": true, - "type": "long" - }, - "Md5": { - "max": 255, - "min": 1, - "pattern": "^[\\p{L}||\\p{N}]+$", - "type": "string" - }, + "Long": { + "box": true, + "type": "long" + }, + "Md5": { + "max": 255, + "min": 1, + "pattern": "[\\p{L}||\\p{N}]+", + "type": "string" + }, + "MultipartReadSetUploadList": { + "member": { + "shape": "MultipartReadSetUploadListItem" + }, + "type": "list" + }, + "MultipartReadSetUploadListItem": { + "members": { + "creationTime": { + "shape": "SyntheticTimestamp_date_time" + }, + "description": { + "shape": "ReadSetDescription" + }, + "generatedFrom": { + "shape": "GeneratedFrom" + }, + "name": { + "shape": "ReadSetName" + }, + "referenceArn": { + "shape": "ReferenceArn" + }, + "sampleId": { + "shape": "SampleId" + }, + "sequenceStoreId": { + "shape": "SequenceStoreId" + }, + "sourceFileType": { + "shape": "FileType" + }, + "subjectId": { + "shape": "SubjectId" + }, + "tags": { + "shape": "TagMap" + }, + "uploadId": { + "shape": "UploadId" + } + }, + "required": [ + "sequenceStoreId", + "uploadId", + "sourceFileType", + "subjectId", + "sampleId", + "generatedFrom", + "referenceArn", + "creationTime" + ], + "type": "structure" + }, "NextToken": { "max": 6144, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, + "NotSupportedOperationException": { + "error": { + "httpStatusCode": 405, + "senderFault": true + }, + "exception": true, + "members": { + "message": { + "shape": "String" + } + }, + "required": [ + "message" + ], + "type": "structure" + }, "PrimitiveBoolean": { "type": "boolean" }, @@ -5755,7 +6519,7 @@ "Range": { "max": 127, "min": 1, - "pattern": "^[\\p{N}||\\p{P}]+$", + "pattern": "[\\p{N}||\\p{P}]+", "type": "string" }, "RangeNotSatisfiableException": { @@ -5833,7 +6597,7 @@ "ReadSetArn": { "max": 127, "min": 1, - "pattern": "^arn:.+$", + "pattern": "arn:.+", "type": "string" }, "ReadSetBatchError": { @@ -5849,8 +6613,8 @@ } }, "required": [ - "code", "id", + "code", "message" ], "type": "structure" @@ -5864,7 +6628,7 @@ "ReadSetDescription": { "max": 255, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "ReadSetExportJobItemStatus": { @@ -5918,14 +6682,23 @@ "createdBefore": { "shape": "SyntheticTimestamp_date_time" }, + "generatedFrom": { + "shape": "GeneratedFrom" + }, "name": { "shape": "ReadSetName" }, "referenceArn": { "shape": "ReferenceArn" }, + "sampleId": { + "shape": "SampleId" + }, "status": { "shape": "ReadSetStatus" + }, + "subjectId": { + "shape": "SubjectId" } }, "type": "structure" @@ -5933,7 +6706,7 @@ "ReadSetId": { "max": 36, "min": 10, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "ReadSetIdList": { @@ -6006,50 +6779,131 @@ "status": { "shape": "ReadSetStatus" }, + "statusMessage": { + "shape": "ReadSetStatusMessage" + }, "subjectId": { "shape": "SubjectId" } }, "required": [ - "arn", - "creationTime", - "fileType", "id", + "arn", "sequenceStoreId", - "status" + "status", + "fileType", + "creationTime" ], "type": "structure" }, "ReadSetName": { "max": 127, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", + "type": "string" + }, + "ReadSetPartSource": { + "enum": [ + "SOURCE1", + "SOURCE2" + ], "type": "string" }, + "ReadSetPartStreamingBlob": { + "requiresLength": true, + "streaming": true, + "type": "blob" + }, "ReadSetStatus": { "enum": [ "ARCHIVED", "ACTIVATING", "ACTIVE", "DELETING", - "DELETED" + "DELETED", + "PROCESSING_UPLOAD", + "UPLOAD_FAILED" ], "type": "string" }, + "ReadSetStatusMessage": { + "max": 255, + "min": 1, + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", + "type": "string" + }, "ReadSetStreamingBlob": { "streaming": true, "type": "blob" }, + "ReadSetUploadPartList": { + "member": { + "shape": "ReadSetUploadPartListItem" + }, + "type": "list" + }, + "ReadSetUploadPartListFilter": { + "members": { + "createdAfter": { + "shape": "SyntheticTimestamp_date_time" + }, + "createdBefore": { + "shape": "SyntheticTimestamp_date_time" + } + }, + "type": "structure" + }, + "ReadSetUploadPartListItem": { + "members": { + "checksum": { + "shape": "String" + }, + "creationTime": { + "shape": "SyntheticTimestamp_date_time" + }, + "lastUpdatedTime": { + "shape": "SyntheticTimestamp_date_time" + }, + "partNumber": { + "shape": "ReadSetUploadPartListItemPartNumberInteger" + }, + "partSize": { + "shape": "ReadSetUploadPartListItemPartSizeLong" + }, + "partSource": { + "shape": "ReadSetPartSource" + } + }, + "required": [ + "partNumber", + "partSize", + "partSource", + "checksum" + ], + "type": "structure" + }, + "ReadSetUploadPartListItemPartNumberInteger": { + "box": true, + "max": 10000, + "min": 1, + "type": "integer" + }, + "ReadSetUploadPartListItemPartSizeLong": { + "box": true, + "max": 5368709120, + "min": 1, + "type": "long" + }, "ReferenceArn": { "max": 127, "min": 1, - "pattern": "^arn:.+$", + "pattern": "arn:.+", "type": "string" }, "ReferenceDescription": { "max": 255, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "ReferenceFile": { @@ -6090,7 +6944,7 @@ "ReferenceId": { "max": 36, "min": 10, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "ReferenceImportJobItemStatus": { @@ -6160,11 +7014,11 @@ } }, "required": [ - "arn", - "creationTime", "id", - "md5", + "arn", "referenceStoreId", + "md5", + "creationTime", "updateTime" ], "type": "structure" @@ -6172,7 +7026,7 @@ "ReferenceName": { "max": 255, "min": 3, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "ReferenceStatus": { @@ -6186,13 +7040,13 @@ "ReferenceStoreArn": { "max": 127, "min": 1, - "pattern": "^arn:.+$", + "pattern": "arn:.+", "type": "string" }, "ReferenceStoreDescription": { "max": 255, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "ReferenceStoreDetail": { @@ -6218,8 +7072,8 @@ }, "required": [ "arn", - "creationTime", - "id" + "id", + "creationTime" ], "type": "structure" }, @@ -6246,13 +7100,13 @@ "ReferenceStoreId": { "max": 36, "min": 10, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "ReferenceStoreName": { "max": 127, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "ReferenceStreamingBlob": { @@ -6276,7 +7130,7 @@ "type": "structure" }, "ResourceId": { - "pattern": "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$", + "pattern": "[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", "type": "string" }, "ResourceIdentifier": { @@ -6303,13 +7157,13 @@ "RoleArn": { "max": 2048, "min": 20, - "pattern": "^arn:.*", + "pattern": "arn:.*", "type": "string" }, "RunArn": { "max": 128, "min": 1, - "pattern": "^arn:.+$", + "pattern": "arn:.+", "type": "string" }, "RunExport": { @@ -6331,13 +7185,13 @@ "RunGroupArn": { "max": 128, "min": 1, - "pattern": "^arn:.+$", + "pattern": "arn:.+", "type": "string" }, "RunGroupId": { "max": 18, "min": 1, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "RunGroupList": { @@ -6363,6 +7217,9 @@ "maxDuration": { "shape": "RunGroupListItemMaxDurationInteger" }, + "maxGpus": { + "shape": "RunGroupListItemMaxGpusInteger" + }, "maxRuns": { "shape": "RunGroupListItemMaxRunsInteger" }, @@ -6384,6 +7241,12 @@ "min": 1, "type": "integer" }, + "RunGroupListItemMaxGpusInteger": { + "box": true, + "max": 100000, + "min": 1, + "type": "integer" + }, "RunGroupListItemMaxRunsInteger": { "box": true, "max": 100000, @@ -6393,19 +7256,19 @@ "RunGroupListToken": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunGroupName": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunGroupRequestId": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunGroupTimestamp": { @@ -6415,7 +7278,7 @@ "RunId": { "max": 18, "min": 1, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "RunLeftNormalization": { @@ -6477,7 +7340,7 @@ "RunListToken": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunLogLevel": { @@ -6494,31 +7357,31 @@ "RunName": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunOutputUri": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunRequestId": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunResourceDigest": { "max": 64, "min": 0, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunResourceDigestKey": { "max": 256, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunResourceDigests": { @@ -6533,7 +7396,7 @@ "RunRoleArn": { "max": 128, "min": 1, - "pattern": "^arn:.+$", + "pattern": "arn:.+", "type": "string" }, "RunStartedBy": { @@ -6557,7 +7420,7 @@ "type": "string" }, "RunStatusMessage": { - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "RunTimestamp": { @@ -6565,17 +7428,17 @@ "type": "timestamp" }, "S3Destination": { - "pattern": "^s3://([a-z0-9][a-z0-9-.]{1,61}[a-z0-9])/?((.{1,1024})/)?$", + "pattern": "s3://([a-z0-9][a-z0-9-.]{1,61}[a-z0-9])/?((.{1,1024})/)?", "type": "string" }, "S3Uri": { - "pattern": "^s3://([a-z0-9][a-z0-9-.]{1,61}[a-z0-9])/(.{1,1024})$", + "pattern": "s3://([a-z0-9][a-z0-9-.]{1,61}[a-z0-9])/(.{1,1024})", "type": "string" }, "SampleId": { "max": 127, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "SchemaItem": { @@ -6590,7 +7453,7 @@ } }, "SchemaItemKeyString": { - "pattern": "^[a-z0-9_]{1,255}$", + "pattern": "[a-z0-9_]{1,255}", "type": "string" }, "SchemaValueType": { @@ -6629,13 +7492,13 @@ "SequenceStoreArn": { "max": 127, "min": 1, - "pattern": "^arn:.+$", + "pattern": "arn:.+", "type": "string" }, "SequenceStoreDescription": { "max": 255, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "SequenceStoreDetail": { @@ -6649,6 +7512,9 @@ "description": { "shape": "SequenceStoreDescription" }, + "fallbackLocation": { + "shape": "S3Destination" + }, "id": { "shape": "SequenceStoreId" }, @@ -6661,8 +7527,8 @@ }, "required": [ "arn", - "creationTime", - "id" + "id", + "creationTime" ], "type": "structure" }, @@ -6689,13 +7555,13 @@ "SequenceStoreId": { "max": 36, "min": 10, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "SequenceStoreName": { "max": 127, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "ServiceQuotaExceededException": { @@ -6745,11 +7611,14 @@ "SseConfigKeyArnString": { "max": 2048, "min": 20, - "pattern": "arn:([^: ]*):([^: ]*):([^: ]*):([0-9]{12}):([^: ]*)", + "pattern": ".*arn:([^: ]*):([^: ]*):([^: ]*):([0-9]{12}):([^: ]*).*", "type": "string" }, "StartAnnotationImportRequest": { "members": { + "annotationFields": { + "shape": "AnnotationFieldMap" + }, "destinationName": { "shape": "StoreName" }, @@ -6768,8 +7637,8 @@ }, "required": [ "destinationName", - "items", - "roleArn" + "roleArn", + "items" ], "type": "structure" }, @@ -6828,10 +7697,10 @@ } }, "required": [ - "creationTime", "id", "sequenceStoreId", - "status" + "status", + "creationTime" ], "type": "structure" }, @@ -6867,9 +7736,9 @@ } }, "required": [ + "sequenceStoreId", "destination", "roleArn", - "sequenceStoreId", "sources" ], "type": "structure" @@ -6901,11 +7770,11 @@ } }, "required": [ - "creationTime", - "destination", "id", "sequenceStoreId", - "status" + "destination", + "status", + "creationTime" ], "type": "structure" }, @@ -6927,8 +7796,8 @@ } }, "required": [ - "roleArn", "sequenceStoreId", + "roleArn", "sources" ], "type": "structure" @@ -6960,11 +7829,11 @@ } }, "required": [ - "creationTime", "id", - "roleArn", "sequenceStoreId", - "status" + "roleArn", + "status", + "creationTime" ], "type": "structure" }, @@ -6999,11 +7868,11 @@ } }, "required": [ - "referenceArn", - "sampleId", - "sourceFileType", "sourceFiles", - "subjectId" + "sourceFileType", + "subjectId", + "sampleId", + "referenceArn" ], "type": "structure" }, @@ -7058,11 +7927,11 @@ } }, "required": [ - "creationTime", "id", "referenceStoreId", "roleArn", - "status" + "status", + "creationTime" ], "type": "structure" }, @@ -7082,8 +7951,8 @@ } }, "required": [ - "name", - "sourceFile" + "sourceFile", + "name" ], "type": "structure" }, @@ -7128,8 +7997,8 @@ } }, "required": [ - "requestId", - "roleArn" + "roleArn", + "requestId" ], "type": "structure" }, @@ -7164,6 +8033,9 @@ }, "StartVariantImportRequest": { "members": { + "annotationFields": { + "shape": "AnnotationFieldMap" + }, "destinationName": { "shape": "StoreName" }, @@ -7179,8 +8051,8 @@ }, "required": [ "destinationName", - "items", - "roleArn" + "roleArn", + "items" ], "type": "structure" }, @@ -7216,7 +8088,7 @@ "StoreName": { "max": 255, "min": 3, - "pattern": "^([a-z]){1}([a-z0-9_]){2,254}$", + "pattern": "([a-z]){1}([a-z0-9_]){2,254}", "type": "string" }, "StoreOptions": { @@ -7244,7 +8116,7 @@ "SubjectId": { "max": 127, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "SyntheticTimestamp_date_time": { @@ -7254,7 +8126,7 @@ "TagArn": { "max": 128, "min": 1, - "pattern": "^arn:.+$", + "pattern": "arn:.+", "type": "string" }, "TagKey": { @@ -7319,7 +8191,7 @@ "TaskId": { "max": 18, "min": 1, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "TaskList": { @@ -7336,6 +8208,9 @@ "creationTime": { "shape": "TaskTimestamp" }, + "gpus": { + "shape": "TaskListItemGpusInteger" + }, "memory": { "shape": "TaskListItemMemoryInteger" }, @@ -7362,6 +8237,11 @@ "min": 1, "type": "integer" }, + "TaskListItemGpusInteger": { + "box": true, + "min": 0, + "type": "integer" + }, "TaskListItemMemoryInteger": { "box": true, "min": 1, @@ -7370,11 +8250,11 @@ "TaskListToken": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "TaskLogStream": { - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "TaskName": { @@ -7397,7 +8277,7 @@ "type": "string" }, "TaskStatusMessage": { - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "TaskTimestamp": { @@ -7523,12 +8403,12 @@ } }, "required": [ - "creationTime", - "description", "id", - "name", "reference", "status", + "name", + "description", + "creationTime", "updateTime" ], "type": "structure" @@ -7546,6 +8426,9 @@ "maxDuration": { "shape": "UpdateRunGroupRequestMaxDurationInteger" }, + "maxGpus": { + "shape": "UpdateRunGroupRequestMaxGpusInteger" + }, "maxRuns": { "shape": "UpdateRunGroupRequestMaxRunsInteger" }, @@ -7570,6 +8453,12 @@ "min": 1, "type": "integer" }, + "UpdateRunGroupRequestMaxGpusInteger": { + "box": true, + "max": 100000, + "min": 1, + "type": "integer" + }, "UpdateRunGroupRequestMaxRunsInteger": { "box": true, "max": 100000, @@ -7621,12 +8510,12 @@ } }, "required": [ - "creationTime", - "description", "id", - "name", "reference", "status", + "name", + "description", + "creationTime", "updateTime" ], "type": "structure" @@ -7650,6 +8539,65 @@ ], "type": "structure" }, + "UploadId": { + "max": 36, + "min": 10, + "pattern": "[0-9]+", + "type": "string" + }, + "UploadReadSetPartRequest": { + "members": { + "partNumber": { + "location": "querystring", + "locationName": "partNumber", + "shape": "UploadReadSetPartRequestPartNumberInteger" + }, + "partSource": { + "location": "querystring", + "locationName": "partSource", + "shape": "ReadSetPartSource" + }, + "payload": { + "shape": "ReadSetPartStreamingBlob" + }, + "sequenceStoreId": { + "location": "uri", + "locationName": "sequenceStoreId", + "shape": "SequenceStoreId" + }, + "uploadId": { + "location": "uri", + "locationName": "uploadId", + "shape": "UploadId" + } + }, + "payload": "payload", + "required": [ + "sequenceStoreId", + "uploadId", + "partSource", + "partNumber", + "payload" + ], + "type": "structure" + }, + "UploadReadSetPartRequestPartNumberInteger": { + "box": true, + "max": 10000, + "min": 1, + "type": "integer" + }, + "UploadReadSetPartResponse": { + "members": { + "checksum": { + "shape": "String" + } + }, + "required": [ + "checksum" + ], + "type": "structure" + }, "ValidationException": { "error": { "httpStatusCode": 400, @@ -7679,8 +8627,8 @@ } }, "required": [ - "jobStatus", - "source" + "source", + "jobStatus" ], "type": "structure" }, @@ -7711,6 +8659,9 @@ }, "VariantImportJobItem": { "members": { + "annotationFields": { + "shape": "AnnotationFieldMap" + }, "completionTime": { "shape": "CompletionTime" }, @@ -7737,11 +8688,11 @@ } }, "required": [ - "creationTime", - "destinationName", "id", + "destinationName", "roleArn", "status", + "creationTime", "updateTime" ], "type": "structure" @@ -7789,17 +8740,17 @@ } }, "required": [ - "creationTime", - "description", "id", - "name", "reference", - "sseConfig", "status", - "statusMessage", "storeArn", - "storeSizeBytes", - "updateTime" + "name", + "description", + "sseConfig", + "creationTime", + "updateTime", + "statusMessage", + "storeSizeBytes" ], "type": "structure" }, @@ -7823,19 +8774,19 @@ "WorkflowArn": { "max": 128, "min": 1, - "pattern": "^arn:.+$", + "pattern": "arn:.+", "type": "string" }, "WorkflowDefinition": { "max": 256, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "WorkflowDescription": { "max": 256, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "WorkflowDigest": { @@ -7871,7 +8822,7 @@ "WorkflowId": { "max": 18, "min": 1, - "pattern": "^[0-9]+$", + "pattern": "[0-9]+", "type": "string" }, "WorkflowList": { @@ -7894,6 +8845,9 @@ "id": { "shape": "WorkflowId" }, + "metadata": { + "shape": "WorkflowMetadata" + }, "name": { "shape": "WorkflowName" }, @@ -7909,19 +8863,38 @@ "WorkflowListToken": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "WorkflowMain": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", + "type": "string" + }, + "WorkflowMetadata": { + "key": { + "shape": "WorkflowMetadataKey" + }, + "type": "map", + "value": { + "shape": "WorkflowMetadataValue" + } + }, + "WorkflowMetadataKey": { + "max": 128, + "min": 1, + "type": "string" + }, + "WorkflowMetadataValue": { + "max": 256, + "min": 0, "type": "string" }, "WorkflowName": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "WorkflowParameter": { @@ -7938,13 +8911,13 @@ "WorkflowParameterDescription": { "max": 256, "min": 0, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "WorkflowParameterName": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "WorkflowParameterTemplate": { @@ -7961,7 +8934,7 @@ "WorkflowRequestId": { "max": 128, "min": 1, - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "WorkflowStatus": { @@ -7970,14 +8943,15 @@ "ACTIVE", "UPDATING", "DELETED", - "FAILED" + "FAILED", + "INACTIVE" ], "max": 64, "min": 1, "type": "string" }, "WorkflowStatusMessage": { - "pattern": "^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$", + "pattern": "[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+", "type": "string" }, "WorkflowTimestamp": { @@ -7986,7 +8960,8 @@ }, "WorkflowType": { "enum": [ - "PRIVATE" + "PRIVATE", + "READY2RUN" ], "max": 64, "min": 1, diff --git a/models/apis/omics/2022-11-28/docs-2.json b/models/apis/omics/2022-11-28/docs-2.json index 82a1b17216d..47277a27153 100644 --- a/models/apis/omics/2022-11-28/docs-2.json +++ b/models/apis/omics/2022-11-28/docs-2.json @@ -1,12 +1,15 @@ { "version": "2.0", - "service": "

This is the Amazon Omics API Reference. For an introduction to the service, see What is Amazon Omics? in the Amazon Omics Developer Guide.

", + "service": "

This is the Amazon Omics API Reference. For an introduction to the service, see What is Amazon Omics? in the Amazon Omics User Guide.

", "operations": { + "AbortMultipartReadSetUpload": "

Stops a multipart upload.

", "BatchDeleteReadSet": "

Deletes one or more read sets.

", "CancelAnnotationImportJob": "

Cancels an annotation import job.

", "CancelRun": "

Cancels a run.

", "CancelVariantImportJob": "

Cancels a variant import job.

", + "CompleteMultipartReadSetUpload": "

Concludes a multipart upload once you have uploaded all the components.

", "CreateAnnotationStore": "

Creates an annotation store.

", + "CreateMultipartReadSetUpload": "

Begins a multipart read set upload.

", "CreateReferenceStore": "

Creates a reference store.

", "CreateRunGroup": "

Creates a run group.

", "CreateSequenceStore": "

Creates a sequence store.

", @@ -40,9 +43,11 @@ "GetWorkflow": "

Gets information about a workflow.

", "ListAnnotationImportJobs": "

Retrieves a list of annotation import jobs.

", "ListAnnotationStores": "

Retrieves a list of annotation stores.

", + "ListMultipartReadSetUploads": "

Lists all multipart read set uploads and their statuses.

", "ListReadSetActivationJobs": "

Retrieves a list of read set activation jobs.

", "ListReadSetExportJobs": "

Retrieves a list of read set export jobs.

", "ListReadSetImportJobs": "

Retrieves a list of read set import jobs.

", + "ListReadSetUploadParts": "

This operation will list all parts in a requested multipart upload for a sequence store.

", "ListReadSets": "

Retrieves a list of read sets.

", "ListReferenceImportJobs": "

Retrieves a list of reference import jobs.

", "ListReferenceStores": "

Retrieves a list of reference stores.

", @@ -67,9 +72,28 @@ "UpdateAnnotationStore": "

Updates an annotation store.

", "UpdateRunGroup": "

Updates a run group.

", "UpdateVariantStore": "

Updates a variant store.

", - "UpdateWorkflow": "

Updates a workflow.

" + "UpdateWorkflow": "

Updates a workflow.

", + "UploadReadSetPart": "

This operation uploads a specific part of a read set. If you upload a new part using a previously used part number, the previously uploaded part will be overwritten.

" }, "shapes": { + "AbortMultipartReadSetUploadRequest": { + "base": null, + "refs": { + } + }, + "AbortMultipartReadSetUploadResponse": { + "base": null, + "refs": { + } + }, + "Accelerators": { + "base": null, + "refs": { + "CreateWorkflowRequest$accelerators": "

The computational accelerator specified to run the workflow.

", + "GetRunResponse$accelerators": "

The computational accelerator used to run the workflow.

", + "GetWorkflowResponse$accelerators": "

The computational accelerator specified to run the workflow.

" + } + }, "AccessDeniedException": { "base": "

You do not have sufficient access to perform this action.

", "refs": { @@ -114,6 +138,29 @@ "StartReadSetActivationJobResponse$id": "

The job's ID.

" } }, + "AnnotationFieldMap": { + "base": null, + "refs": { + "AnnotationImportJobItem$annotationFields": "

The annotation schema generated by the parsed annotation data.

", + "GetAnnotationImportResponse$annotationFields": "

The annotation schema generated by the parsed annotation data.

", + "GetVariantImportResponse$annotationFields": "

The annotation schema generated by the parsed annotation data.

", + "StartAnnotationImportRequest$annotationFields": "

The annotation schema generated by the parsed annotation data.

", + "StartVariantImportRequest$annotationFields": "

The annotation schema generated by the parsed annotation data.

", + "VariantImportJobItem$annotationFields": "

The annotation schema generated by the parsed annotation data.

" + } + }, + "AnnotationFieldMapKeyString": { + "base": null, + "refs": { + "AnnotationFieldMap$key": null + } + }, + "AnnotationFieldMapValueString": { + "base": null, + "refs": { + "AnnotationFieldMap$value": null + } + }, "AnnotationImportItemDetail": { "base": "

Details about an imported annotation item.

", "refs": { @@ -202,8 +249,8 @@ "Boolean": { "base": null, "refs": { - "VcfOptions$ignoreFilterField": "

The file's ignore filter field setting.

", "VcfOptions$ignoreQualField": "

The file's ignore qual field setting.

", + "VcfOptions$ignoreFilterField": "

The file's ignore filter field setting.

", "WorkflowParameter$optional": "

Whether the parameter is optional.

" } }, @@ -235,6 +282,7 @@ "ClientToken": { "base": null, "refs": { + "CreateMultipartReadSetUploadRequest$clientToken": "

An idempotency token that can be used to avoid triggering multiple multipart uploads.

", "CreateReferenceStoreRequest$clientToken": "

To ensure that requests don't run multiple times, specify a unique token for each request.

", "CreateSequenceStoreRequest$clientToken": "

To ensure that requests don't run multiple times, specify a unique token for each request.

", "StartReadSetActivationJobRequest$clientToken": "

To ensure that jobs don't run multiple times, specify a unique token for each job.

", @@ -249,6 +297,34 @@ "ReadOptions$comment": "

The file's comment character.

" } }, + "CompleteMultipartReadSetUploadRequest": { + "base": null, + "refs": { + } + }, + "CompleteMultipartReadSetUploadResponse": { + "base": null, + "refs": { + } + }, + "CompleteReadSetUploadPartList": { + "base": null, + "refs": { + "CompleteMultipartReadSetUploadRequest$parts": "

The individual uploads or parts of a multipart upload.

" + } + }, + "CompleteReadSetUploadPartListItem": { + "base": "

Part of the response to the CompleteReadSetUpload API, including metadata.

", + "refs": { + "CompleteReadSetUploadPartList$member": null + } + }, + "CompleteReadSetUploadPartListItemPartNumberInteger": { + "base": null, + "refs": { + "CompleteReadSetUploadPartListItem$partNumber": "

A number identifying the part in a read set upload.

" + } + }, "CompletionTime": { "base": null, "refs": { @@ -279,6 +355,16 @@ "refs": { } }, + "CreateMultipartReadSetUploadRequest": { + "base": null, + "refs": { + } + }, + "CreateMultipartReadSetUploadResponse": { + "base": null, + "refs": { + } + }, "CreateReferenceStoreRequest": { "base": null, "refs": { @@ -306,6 +392,12 @@ "CreateRunGroupRequest$maxDuration": "

A maximum run time for the group in minutes.

" } }, + "CreateRunGroupRequestMaxGpusInteger": { + "base": null, + "refs": { + "CreateRunGroupRequest$maxGpus": "

The maximum GPUs that can be used by a run group.

" + } + }, "CreateRunGroupRequestMaxRunsInteger": { "base": null, "refs": { @@ -513,11 +605,11 @@ "FileInformation": { "base": "

Details about a file.

", "refs": { - "ReadSetFiles$index": "

The files' index.

", "ReadSetFiles$source1": "

The location of the first file in Amazon S3.

", "ReadSetFiles$source2": "

The location of the second file in Amazon S3.

", - "ReferenceFiles$index": "

The files' index.

", - "ReferenceFiles$source": "

The source file's location in Amazon S3.

" + "ReadSetFiles$index": "

The files' index.

", + "ReferenceFiles$source": "

The source file's location in Amazon S3.

", + "ReferenceFiles$index": "

The files' index.

" } }, "FileInformationContentLengthLong": { @@ -541,8 +633,11 @@ "FileType": { "base": null, "refs": { + "CreateMultipartReadSetUploadRequest$sourceFileType": "

The type of file being uploaded.

", + "CreateMultipartReadSetUploadResponse$sourceFileType": "

The file type of the read set source.

", "GetReadSetMetadataResponse$fileType": "

The read set's file type.

", "ImportReadSetSourceItem$sourceFileType": "

The source's file type.

", + "MultipartReadSetUploadListItem$sourceFileType": "

The type of file the read set originated from.

", "ReadSetListItem$fileType": "

The read set's file type.

", "StartReadSetImportJobSourceItem$sourceFileType": "

The source's file type.

" } @@ -575,7 +670,11 @@ "GeneratedFrom": { "base": null, "refs": { + "CreateMultipartReadSetUploadRequest$generatedFrom": "

Where the source originated.

", + "CreateMultipartReadSetUploadResponse$generatedFrom": "

The source of the read set.

", "ImportReadSetSourceItem$generatedFrom": "

Where the source originated.

", + "MultipartReadSetUploadListItem$generatedFrom": "

The source of an uploaded part.

", + "ReadSetFilter$generatedFrom": "

Where the source originated.

", "SequenceInformation$generatedFrom": "

Where the sequence originated.

", "StartReadSetImportJobSourceItem$generatedFrom": "

Where the source originated.

" } @@ -724,6 +823,12 @@ "GetRunGroupResponse$maxDuration": "

The group's maximum run time in minutes.

" } }, + "GetRunGroupResponseMaxGpusInteger": { + "base": null, + "refs": { + "GetRunGroupResponse$maxGpus": "

The maximum GPUs that can be used by a run group.

" + } + }, "GetRunGroupResponseMaxRunsInteger": { "base": null, "refs": { @@ -768,6 +873,12 @@ "GetRunTaskResponse$cpus": "

The task's CPU usage.

" } }, + "GetRunTaskResponseGpusInteger": { + "base": null, + "refs": { + "GetRunTaskResponse$gpus": "

The number of Graphics Processing Units (GPU) specified in the task.

" + } + }, "GetRunTaskResponseMemoryInteger": { "base": null, "refs": { @@ -1012,6 +1123,22 @@ "refs": { } }, + "ListMultipartReadSetUploadsRequest": { + "base": null, + "refs": { + } + }, + "ListMultipartReadSetUploadsRequestMaxResultsInteger": { + "base": null, + "refs": { + "ListMultipartReadSetUploadsRequest$maxResults": "

The maximum number of multipart uploads returned in a page.

" + } + }, + "ListMultipartReadSetUploadsResponse": { + "base": null, + "refs": { + } + }, "ListReadSetActivationJobsRequest": { "base": null, "refs": { @@ -1060,6 +1187,22 @@ "refs": { } }, + "ListReadSetUploadPartsRequest": { + "base": null, + "refs": { + } + }, + "ListReadSetUploadPartsRequestMaxResultsInteger": { + "base": null, + "refs": { + "ListReadSetUploadPartsRequest$maxResults": "

The maximum number of read set upload parts returned in a page.

" + } + }, + "ListReadSetUploadPartsResponse": { + "base": null, + "refs": { + } + }, "ListReadSetsRequest": { "base": null, "refs": { @@ -1288,8 +1431,8 @@ "AnnotationStoreItem$storeSizeBytes": "

The store's size in bytes.

", "GetAnnotationStoreResponse$storeSizeBytes": "

The store's size in bytes.

", "GetVariantStoreResponse$storeSizeBytes": "

The store's size in bytes.

", - "SequenceInformation$totalBaseCount": "

The sequence's total base count.

", "SequenceInformation$totalReadCount": "

The sequence's total read count.

", + "SequenceInformation$totalBaseCount": "

The sequence's total base count.

", "VariantStoreItem$storeSizeBytes": "

The store's size in bytes.

" } }, @@ -1301,15 +1444,31 @@ "ReferenceListItem$md5": "

The reference's MD5 checksum.

" } }, + "MultipartReadSetUploadList": { + "base": null, + "refs": { + "ListMultipartReadSetUploadsResponse$uploads": "

An array of multipart uploads.

" + } + }, + "MultipartReadSetUploadListItem": { + "base": "

Part of the response to ListMultipartReadSetUploads, excluding completed and aborted multipart uploads.

", + "refs": { + "MultipartReadSetUploadList$member": null + } + }, "NextToken": { "base": null, "refs": { + "ListMultipartReadSetUploadsRequest$nextToken": "

Next token returned in the response of a previous ListMultipartReadSetUploads call. Used to get the next page of results.

", + "ListMultipartReadSetUploadsResponse$nextToken": "

Next token returned in the response of a previous ListMultipartReadSetUploads call. Used to get the next page of results.

", "ListReadSetActivationJobsRequest$nextToken": "

Specify the pagination token from a previous request to retrieve the next page of results.

", "ListReadSetActivationJobsResponse$nextToken": "

A pagination token that's included if more results are available.

", "ListReadSetExportJobsRequest$nextToken": "

Specify the pagination token from a previous request to retrieve the next page of results.

", "ListReadSetExportJobsResponse$nextToken": "

A pagination token that's included if more results are available.

", "ListReadSetImportJobsRequest$nextToken": "

Specify the pagination token from a previous request to retrieve the next page of results.

", "ListReadSetImportJobsResponse$nextToken": "

A pagination token that's included if more results are available.

", + "ListReadSetUploadPartsRequest$nextToken": "

Next token returned in the response of a previous ListReadSetUploadPartsRequest call. Used to get the next page of results.

", + "ListReadSetUploadPartsResponse$nextToken": "

Next token returned in the response of a previous ListReadSetUploadParts call. Used to get the next page of results.

", "ListReadSetsRequest$nextToken": "

Specify the pagination token from a previous request to retrieve the next page of results.

", "ListReadSetsResponse$nextToken": "

A pagination token that's included if more results are available.

", "ListReferenceImportJobsRequest$nextToken": "

Specify the pagination token from a previous request to retrieve the next page of results.

", @@ -1322,6 +1481,11 @@ "ListSequenceStoresResponse$nextToken": "

A pagination token that's included if more results are available.

" } }, + "NotSupportedOperationException": { + "base": "

The operation is not supported by Amazon Omics, or the API does not exist.

", + "refs": { + } + }, "PrimitiveBoolean": { "base": null, "refs": { @@ -1395,8 +1559,11 @@ "ReadSetDescription": { "base": null, "refs": { + "CreateMultipartReadSetUploadRequest$description": "

The description of the read set.

", + "CreateMultipartReadSetUploadResponse$description": "

The description of the read set.

", "GetReadSetMetadataResponse$description": "

The read set's description.

", "ImportReadSetSourceItem$description": "

The source's description.

", + "MultipartReadSetUploadListItem$description": "

The description of a read set.

", "ReadSetListItem$description": "

The read set's description.

", "StartReadSetImportJobSourceItem$description": "

The source's description.

" } @@ -1438,6 +1605,7 @@ "base": null, "refs": { "ActivateReadSetSourceItem$readSetId": "

The source's read set ID.

", + "CompleteMultipartReadSetUploadResponse$readSetId": "

The read set ID created for an uploaded read set.

", "ExportReadSet$readSetId": "

The set's ID.

", "ExportReadSetDetail$id": "

The set's ID.

", "GetReadSetMetadataRequest$id": "

The read set's ID.

", @@ -1485,13 +1653,31 @@ "ReadSetName": { "base": null, "refs": { + "CreateMultipartReadSetUploadRequest$name": "

The name of the read set.

", + "CreateMultipartReadSetUploadResponse$name": "

The name of the read set.

", "GetReadSetMetadataResponse$name": "

The read set's name.

", "ImportReadSetSourceItem$name": "

The source's name.

", + "MultipartReadSetUploadListItem$name": "

The name of a read set.

", "ReadSetFilter$name": "

A name to filter on.

", "ReadSetListItem$name": "

The read set's name.

", "StartReadSetImportJobSourceItem$name": "

The source's name.

" } }, + "ReadSetPartSource": { + "base": null, + "refs": { + "CompleteReadSetUploadPartListItem$partSource": "

The source file of the part being uploaded.

", + "ListReadSetUploadPartsRequest$partSource": "

The source file for the upload part.

", + "ReadSetUploadPartListItem$partSource": "

The origin of the part being direct uploaded.

", + "UploadReadSetPartRequest$partSource": "

The source file for an upload part.

" + } + }, + "ReadSetPartStreamingBlob": { + "base": null, + "refs": { + "UploadReadSetPartRequest$payload": "

The read set data to upload for a part.

" + } + }, "ReadSetStatus": { "base": null, "refs": { @@ -1500,18 +1686,58 @@ "ReadSetListItem$status": "

The read set's status.

" } }, + "ReadSetStatusMessage": { + "base": null, + "refs": { + "GetReadSetMetadataResponse$statusMessage": "

The status message for a read set. It provides more detail as to why the read set has a status.

", + "ReadSetListItem$statusMessage": "

The status for a read set. It provides more detail as to why the read set has a status.

" + } + }, "ReadSetStreamingBlob": { "base": null, "refs": { "GetReadSetResponse$payload": "

The read set file payload.

" } }, + "ReadSetUploadPartList": { + "base": null, + "refs": { + "ListReadSetUploadPartsResponse$parts": "

An array of upload parts.

" + } + }, + "ReadSetUploadPartListFilter": { + "base": "

Filter settings that select for read set upload parts of interest.

", + "refs": { + "ListReadSetUploadPartsRequest$filter": "

Attributes used to filter for a specific subset of read set part uploads.

" + } + }, + "ReadSetUploadPartListItem": { + "base": "

The metadata of a single part of a file that was added to a multipart upload. A list of these parts is returned in the response to the ListReadSetUploadParts API.

", + "refs": { + "ReadSetUploadPartList$member": null + } + }, + "ReadSetUploadPartListItemPartNumberInteger": { + "base": null, + "refs": { + "ReadSetUploadPartListItem$partNumber": "

The number identifying the part in an upload.

" + } + }, + "ReadSetUploadPartListItemPartSizeLong": { + "base": null, + "refs": { + "ReadSetUploadPartListItem$partSize": "

The size of the the part in an upload.

" + } + }, "ReferenceArn": { "base": null, "refs": { + "CreateMultipartReadSetUploadRequest$referenceArn": "

The ARN of the reference.

", + "CreateMultipartReadSetUploadResponse$referenceArn": "

The read set source's reference ARN.

", "GetReadSetMetadataResponse$referenceArn": "

The read set's genome reference ARN.

", "GetReferenceMetadataResponse$arn": "

The reference's ARN.

", "ImportReadSetSourceItem$referenceArn": "

The source's genome reference ARN.

", + "MultipartReadSetUploadListItem$referenceArn": "

The source's reference ARN.

", "ReadSetFilter$referenceArn": "

A genome reference ARN to filter on.

", "ReadSetListItem$referenceArn": "

The read set's genome reference ARN.

", "ReferenceItem$referenceArn": "

The reference's ARN.

", @@ -1808,6 +2034,12 @@ "RunGroupListItem$maxDuration": "

The group's maximum duration setting in minutes.

" } }, + "RunGroupListItemMaxGpusInteger": { + "base": null, + "refs": { + "RunGroupListItem$maxGpus": "

The maximum GPUs that can be used by a run group.

" + } + }, "RunGroupListItemMaxRunsInteger": { "base": null, "refs": { @@ -1972,6 +2204,7 @@ "base": null, "refs": { "GetRunResponse$status": "

The run's status.

", + "ListRunsRequest$status": "

The status of a run.

", "RunListItem$status": "

The run's status.

", "StartRunResponse$status": "

The run's status.

" } @@ -1996,8 +2229,12 @@ "S3Destination": { "base": null, "refs": { + "CreateSequenceStoreRequest$fallbackLocation": "

An S3 location that is used to store files that have failed a direct upload.

", + "CreateSequenceStoreResponse$fallbackLocation": "

An S3 location that is used to store files that have failed a direct upload.

", "ExportReadSetJobDetail$destination": "

The job's destination in Amazon S3.

", "GetReadSetExportJobResponse$destination": "

The job's destination in Amazon S3.

", + "GetSequenceStoreResponse$fallbackLocation": "

An S3 location that is used to store files that have failed a direct upload.

", + "SequenceStoreDetail$fallbackLocation": "

An S3 location that is used to store files that have failed a direct upload.

", "StartReadSetExportJobRequest$destination": "

A location for exported files in Amazon S3.

", "StartReadSetExportJobResponse$destination": "

The job's output location.

" } @@ -2018,8 +2255,12 @@ "SampleId": { "base": null, "refs": { + "CreateMultipartReadSetUploadRequest$sampleId": "

The source's sample ID.

", + "CreateMultipartReadSetUploadResponse$sampleId": "

The source's sample ID.

", "GetReadSetMetadataResponse$sampleId": "

The read set's sample ID.

", "ImportReadSetSourceItem$sampleId": "

The source's sample ID.

", + "MultipartReadSetUploadListItem$sampleId": "

The read set source's sample ID.

", + "ReadSetFilter$sampleId": "

The read set source's sample ID.

", "ReadSetListItem$sampleId": "

The read set's sample ID.

", "StartReadSetImportJobSourceItem$sampleId": "

The source's sample ID.

" } @@ -2093,8 +2334,12 @@ "SequenceStoreId": { "base": null, "refs": { + "AbortMultipartReadSetUploadRequest$sequenceStoreId": "

The sequence store ID for the store involved in the multipart upload.

", "ActivateReadSetJobItem$sequenceStoreId": "

The job's sequence store ID.

", "BatchDeleteReadSetRequest$sequenceStoreId": "

The read sets' sequence store ID.

", + "CompleteMultipartReadSetUploadRequest$sequenceStoreId": "

The sequence store ID for the store involved in the multipart upload.

", + "CreateMultipartReadSetUploadRequest$sequenceStoreId": "

The sequence store ID for the store that is the destination of the multipart uploads.

", + "CreateMultipartReadSetUploadResponse$sequenceStoreId": "

The sequence store ID for the store that the read set will be created in.

", "CreateSequenceStoreResponse$id": "

The store's ID.

", "DeleteSequenceStoreRequest$id": "

The sequence store's ID.

", "ExportReadSetJobDetail$sequenceStoreId": "

The job's sequence store ID.

", @@ -2110,10 +2355,13 @@ "GetSequenceStoreRequest$id": "

The store's ID.

", "GetSequenceStoreResponse$id": "

The store's ID.

", "ImportReadSetJobItem$sequenceStoreId": "

The job's sequence store ID.

", + "ListMultipartReadSetUploadsRequest$sequenceStoreId": "

The Sequence Store ID used for the multipart uploads.

", "ListReadSetActivationJobsRequest$sequenceStoreId": "

The read set's sequence store ID.

", "ListReadSetExportJobsRequest$sequenceStoreId": "

The jobs' sequence store ID.

", "ListReadSetImportJobsRequest$sequenceStoreId": "

The jobs' sequence store ID.

", + "ListReadSetUploadPartsRequest$sequenceStoreId": "

The Sequence Store ID used for the multipart uploads.

", "ListReadSetsRequest$sequenceStoreId": "

The jobs' sequence store ID.

", + "MultipartReadSetUploadListItem$sequenceStoreId": "

The sequence store ID used for the multipart upload.

", "ReadSetListItem$sequenceStoreId": "

The read set's sequence store ID.

", "SequenceStoreDetail$id": "

The store's ID.

", "StartReadSetActivationJobRequest$sequenceStoreId": "

The read set's sequence store ID.

", @@ -2121,7 +2369,8 @@ "StartReadSetExportJobRequest$sequenceStoreId": "

The read set's sequence store ID.

", "StartReadSetExportJobResponse$sequenceStoreId": "

The read set's sequence store ID.

", "StartReadSetImportJobRequest$sequenceStoreId": "

The read set's sequence store ID.

", - "StartReadSetImportJobResponse$sequenceStoreId": "

The read set's sequence store ID.

" + "StartReadSetImportJobResponse$sequenceStoreId": "

The read set's sequence store ID.

", + "UploadReadSetPartRequest$sequenceStoreId": "

The Sequence Store ID used for the multipart upload.

" } }, "SequenceStoreName": { @@ -2368,9 +2617,10 @@ "base": null, "refs": { "AccessDeniedException$message": null, - "AnnotationImportJobItem$destinationName": "

The job's destination annotation store.

", "AnnotationImportJobItem$id": "

The job's ID.

", + "AnnotationImportJobItem$destinationName": "

The job's destination annotation store.

", "AnnotationStoreItem$name": "

The store's name.

", + "CompleteReadSetUploadPartListItem$checksum": "

A unique identifier used to confirm that parts are being added to the correct upload.

", "ConflictException$message": null, "CreateAnnotationStoreResponse$name": "

The store's name.

", "CreateVariantStoreResponse$name": "

The store's name.

", @@ -2387,9 +2637,11 @@ "ListVariantImportJobsFilter$storeName": "

A store name to filter on.

", "ListVariantImportJobsResponse$nextToken": "

A pagination token that's included if more results are available.

", "ListVariantStoresResponse$nextToken": "

A pagination token that's included if more results are available.

", + "NotSupportedOperationException$message": null, "RangeNotSatisfiableException$message": null, "ReadSetBatchError$code": "

The error's code.

", "ReadSetBatchError$message": "

The error's message.

", + "ReadSetUploadPartListItem$checksum": "

A unique identifier used to confirm that parts are being added to the correct upload.

", "RequestTimeoutException$message": null, "ResourceNotFoundException$message": null, "SequenceInformation$alignment": "

The sequence's alignment setting.

", @@ -2399,17 +2651,22 @@ "UpdateAnnotationStoreResponse$name": "

The store's name.

", "UpdateVariantStoreRequest$name": "

A name for the store.

", "UpdateVariantStoreResponse$name": "

The store's name.

", + "UploadReadSetPartResponse$checksum": "

An identifier used to confirm that parts are being added to the intended upload.

", "ValidationException$message": null, - "VariantImportJobItem$destinationName": "

The job's destination variant store.

", "VariantImportJobItem$id": "

The job's ID.

", + "VariantImportJobItem$destinationName": "

The job's destination variant store.

", "VariantStoreItem$name": "

The store's name.

" } }, "SubjectId": { "base": null, "refs": { + "CreateMultipartReadSetUploadRequest$subjectId": "

The source's subject ID.

", + "CreateMultipartReadSetUploadResponse$subjectId": "

The source's subject ID.

", "GetReadSetMetadataResponse$subjectId": "

The read set's subject ID.

", "ImportReadSetSourceItem$subjectId": "

The source's subject ID.

", + "MultipartReadSetUploadListItem$subjectId": "

The read set source's subject ID.

", + "ReadSetFilter$subjectId": "

The read set source's subject ID.

", "ReadSetListItem$subjectId": "

The read set's subject ID.

", "StartReadSetImportJobSourceItem$subjectId": "

The source's subject ID.

" } @@ -2419,38 +2676,44 @@ "refs": { "ActivateReadSetFilter$createdAfter": "

The filter's start date.

", "ActivateReadSetFilter$createdBefore": "

The filter's end date.

", - "ActivateReadSetJobItem$completionTime": "

When the job completed.

", "ActivateReadSetJobItem$creationTime": "

When the job was created.

", + "ActivateReadSetJobItem$completionTime": "

When the job completed.

", + "CreateMultipartReadSetUploadResponse$creationTime": "

The creation time of the multipart upload.

", "CreateReferenceStoreResponse$creationTime": "

When the store was created.

", "CreateSequenceStoreResponse$creationTime": "

When the store was created.

", "ExportReadSetFilter$createdAfter": "

The filter's start date.

", "ExportReadSetFilter$createdBefore": "

The filter's end date.

", - "ExportReadSetJobDetail$completionTime": "

When the job completed.

", "ExportReadSetJobDetail$creationTime": "

When the job was created.

", - "GetReadSetActivationJobResponse$completionTime": "

When the job completed.

", + "ExportReadSetJobDetail$completionTime": "

When the job completed.

", "GetReadSetActivationJobResponse$creationTime": "

When the job was created.

", - "GetReadSetExportJobResponse$completionTime": "

When the job completed.

", + "GetReadSetActivationJobResponse$completionTime": "

When the job completed.

", "GetReadSetExportJobResponse$creationTime": "

When the job was created.

", - "GetReadSetImportJobResponse$completionTime": "

When the job completed.

", + "GetReadSetExportJobResponse$completionTime": "

When the job completed.

", "GetReadSetImportJobResponse$creationTime": "

When the job was created.

", + "GetReadSetImportJobResponse$completionTime": "

When the job completed.

", "GetReadSetMetadataResponse$creationTime": "

When the read set was created.

", - "GetReferenceImportJobResponse$completionTime": "

When the job completed.

", "GetReferenceImportJobResponse$creationTime": "

When the job was created.

", + "GetReferenceImportJobResponse$completionTime": "

When the job completed.

", "GetReferenceMetadataResponse$creationTime": "

When the reference was created.

", "GetReferenceMetadataResponse$updateTime": "

When the reference was updated.

", "GetReferenceStoreResponse$creationTime": "

When the store was created.

", "GetSequenceStoreResponse$creationTime": "

When the store was created.

", "ImportReadSetFilter$createdAfter": "

The filter's start date.

", "ImportReadSetFilter$createdBefore": "

The filter's end date.

", - "ImportReadSetJobItem$completionTime": "

When the job completed.

", "ImportReadSetJobItem$creationTime": "

When the job was created.

", + "ImportReadSetJobItem$completionTime": "

When the job completed.

", "ImportReferenceFilter$createdAfter": "

The filter's start date.

", "ImportReferenceFilter$createdBefore": "

The filter's end date.

", - "ImportReferenceJobItem$completionTime": "

When the job completed.

", "ImportReferenceJobItem$creationTime": "

When the job was created.

", + "ImportReferenceJobItem$completionTime": "

When the job completed.

", + "MultipartReadSetUploadListItem$creationTime": "

The time stamp for when a direct upload was created.

", "ReadSetFilter$createdAfter": "

The filter's start date.

", "ReadSetFilter$createdBefore": "

The filter's end date.

", "ReadSetListItem$creationTime": "

When the read set was created.

", + "ReadSetUploadPartListFilter$createdAfter": "

Filters for read set uploads after a specified time.

", + "ReadSetUploadPartListFilter$createdBefore": "

Filters for read set part uploads before a specified time.

", + "ReadSetUploadPartListItem$creationTime": "

The time stamp for when a direct upload was created.

", + "ReadSetUploadPartListItem$lastUpdatedTime": "

The time stamp for the most recent update to an uploaded part.

", "ReferenceFilter$createdAfter": "

The filter's start date.

", "ReferenceFilter$createdBefore": "

The filter's end date.

", "ReferenceListItem$creationTime": "

When the reference was created.

", @@ -2493,6 +2756,8 @@ "base": null, "refs": { "CreateAnnotationStoreRequest$tags": "

Tags for the store.

", + "CreateMultipartReadSetUploadRequest$tags": "

Any tags to add to the read set.

", + "CreateMultipartReadSetUploadResponse$tags": "

The tags to add to the read set.

", "CreateReferenceStoreRequest$tags": "

Tags for the store.

", "CreateRunGroupRequest$tags": "

Tags for the group.

", "CreateRunGroupResponse$tags": "

Tags for the run group.

", @@ -2508,6 +2773,7 @@ "ImportReadSetSourceItem$tags": "

The source's tags.

", "ImportReferenceSourceItem$tags": "

The source's tags.

", "ListTagsForResourceResponse$tags": "

A list of tags.

", + "MultipartReadSetUploadListItem$tags": "

Any tags you wish to add to a read set.

", "StartReadSetImportJobSourceItem$tags": "

The source's tags.

", "StartReferenceImportJobSourceItem$tags": "

The source's tags.

", "StartRunRequest$tags": "

Tags for the run.

", @@ -2563,6 +2829,12 @@ "TaskListItem$cpus": "

The task's CPU count.

" } }, + "TaskListItemGpusInteger": { + "base": null, + "refs": { + "TaskListItem$gpus": "

The number of Graphics Processing Units (GPU) specified for the task.

" + } + }, "TaskListItemMemoryInteger": { "base": null, "refs": { @@ -2674,6 +2946,12 @@ "UpdateRunGroupRequest$maxDuration": "

A maximum run time for the group in minutes.

" } }, + "UpdateRunGroupRequestMaxGpusInteger": { + "base": null, + "refs": { + "UpdateRunGroupRequest$maxGpus": "

The maximum GPUs that can be used by a run group.

" + } + }, "UpdateRunGroupRequestMaxRunsInteger": { "base": null, "refs": { @@ -2710,6 +2988,33 @@ "refs": { } }, + "UploadId": { + "base": null, + "refs": { + "AbortMultipartReadSetUploadRequest$uploadId": "

The ID for the multipart upload.

", + "CompleteMultipartReadSetUploadRequest$uploadId": "

The ID for the multipart upload.

", + "CreateMultipartReadSetUploadResponse$uploadId": "

he ID for the initiated multipart upload.

", + "ListReadSetUploadPartsRequest$uploadId": "

The ID for the initiated multipart upload.

", + "MultipartReadSetUploadListItem$uploadId": "

The ID for the initiated multipart upload.

", + "UploadReadSetPartRequest$uploadId": "

The ID for the initiated multipart upload.

" + } + }, + "UploadReadSetPartRequest": { + "base": null, + "refs": { + } + }, + "UploadReadSetPartRequestPartNumberInteger": { + "base": null, + "refs": { + "UploadReadSetPartRequest$partNumber": "

The number of the part being uploaded.

" + } + }, + "UploadReadSetPartResponse": { + "base": null, + "refs": { + } + }, "ValidationException": { "base": "

The input fails to satisfy the constraints specified by an AWS service.

", "refs": { @@ -2860,6 +3165,25 @@ "GetWorkflowResponse$main": "

The path of the main definition file for the workflow.

" } }, + "WorkflowMetadata": { + "base": null, + "refs": { + "GetWorkflowResponse$metadata": "

Gets metadata for workflow.

", + "WorkflowListItem$metadata": "

Any metadata available for workflow. The information listed may vary depending on the workflow, and there may also be no metadata to return.

" + } + }, + "WorkflowMetadataKey": { + "base": null, + "refs": { + "WorkflowMetadata$key": null + } + }, + "WorkflowMetadataValue": { + "base": null, + "refs": { + "WorkflowMetadata$value": null + } + }, "WorkflowName": { "base": null, "refs": { diff --git a/models/apis/omics/2022-11-28/endpoint-tests-1.json b/models/apis/omics/2022-11-28/endpoint-tests-1.json index de9c6b65001..4770b5497f2 100644 --- a/models/apis/omics/2022-11-28/endpoint-tests-1.json +++ b/models/apis/omics/2022-11-28/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": true, + "UseFIPS": true, "Region": "us-gov-east-1", - "UseFIPS": true + "UseDualStack": true } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": true, "Region": "us-gov-east-1", - "UseFIPS": true + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": true, + "UseFIPS": false, "Region": "us-gov-east-1", - "UseFIPS": false + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": false, "Region": "us-gov-east-1", - "UseFIPS": false + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, + "UseFIPS": true, "Region": "cn-north-1", - "UseFIPS": true + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": true, "Region": "cn-north-1", - "UseFIPS": true + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "UseFIPS": false, "Region": "cn-north-1", - "UseFIPS": false + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": false, "Region": "cn-north-1", - "UseFIPS": false + "UseDualStack": false } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "UseFIPS": true, "Region": "us-iso-east-1", - "UseFIPS": true + "UseDualStack": true } }, { @@ -123,9 +123,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": true, "Region": "us-iso-east-1", - "UseFIPS": true + "UseDualStack": false } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "UseFIPS": false, "Region": "us-iso-east-1", - "UseFIPS": false + "UseDualStack": true } }, { @@ -147,9 +147,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": false, "Region": "us-iso-east-1", - "UseFIPS": false + "UseDualStack": false } }, { @@ -160,9 +160,9 @@ } }, "params": { - "UseDualStack": true, + "UseFIPS": true, "Region": "us-east-1", - "UseFIPS": true + "UseDualStack": true } }, { @@ -173,9 +173,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": true, "Region": "us-east-1", - "UseFIPS": true + "UseDualStack": false } }, { @@ -186,9 +186,9 @@ } }, "params": { - "UseDualStack": true, + "UseFIPS": false, "Region": "us-east-1", - "UseFIPS": false + "UseDualStack": true } }, { @@ -199,9 +199,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": false, "Region": "us-east-1", - "UseFIPS": false + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "UseFIPS": true, "Region": "us-isob-east-1", - "UseFIPS": true + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": true, "Region": "us-isob-east-1", - "UseFIPS": true + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "UseFIPS": false, "Region": "us-isob-east-1", - "UseFIPS": false + "UseDualStack": true } }, { @@ -247,9 +247,9 @@ } }, "params": { - "UseDualStack": false, + "UseFIPS": false, "Region": "us-isob-east-1", - "UseFIPS": false + "UseDualStack": false } }, { @@ -260,9 +260,9 @@ } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "Region": "us-east-1", "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true, "Endpoint": "https://example.com" } } diff --git a/models/apis/omics/2022-11-28/paginators-1.json b/models/apis/omics/2022-11-28/paginators-1.json index ea92fb1c539..596e4bd7c24 100644 --- a/models/apis/omics/2022-11-28/paginators-1.json +++ b/models/apis/omics/2022-11-28/paginators-1.json @@ -12,6 +12,12 @@ "limit_key": "maxResults", "result_key": "annotationStores" }, + "ListMultipartReadSetUploads": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "uploads" + }, "ListReadSetActivationJobs": { "input_token": "nextToken", "output_token": "nextToken", @@ -30,6 +36,12 @@ "limit_key": "maxResults", "result_key": "importJobs" }, + "ListReadSetUploadParts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "parts" + }, "ListReadSets": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/models/apis/omics/2022-11-28/smoke.json b/models/apis/omics/2022-11-28/smoke.json new file mode 100644 index 00000000000..a9756813e4a --- /dev/null +++ b/models/apis/omics/2022-11-28/smoke.json @@ -0,0 +1,6 @@ +{ + "version": 1, + "defaultRegion": "us-west-2", + "testCases": [ + ] +} diff --git a/models/apis/opensearch/2021-01-01/api-2.json b/models/apis/opensearch/2021-01-01/api-2.json index de3550928b8..e1615676390 100644 --- a/models/apis/opensearch/2021-01-01/api-2.json +++ b/models/apis/opensearch/2021-01-01/api-2.json @@ -1823,7 +1823,8 @@ }, "DescribePackagesFilterValues":{ "type":"list", - "member":{"shape":"DescribePackagesFilterValue"} + "member":{"shape":"DescribePackagesFilterValue"}, + "min":1 }, "DescribePackagesRequest":{ "type":"structure", diff --git a/models/apis/route53resolver/2018-04-01/endpoint-rule-set-1.json b/models/apis/route53resolver/2018-04-01/endpoint-rule-set-1.json index 55379714836..500b0fc14fc 100644 --- a/models/apis/route53resolver/2018-04-01/endpoint-rule-set-1.json +++ b/models/apis/route53resolver/2018-04-01/endpoint-rule-set-1.json @@ -242,6 +242,44 @@ "conditions": [], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-gov-east-1" + ] + } + ], + "endpoint": { + "url": "https://route53resolver.us-gov-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-gov-west-1" + ] + } + ], + "endpoint": { + "url": "https://route53resolver.us-gov-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { diff --git a/models/apis/route53resolver/2018-04-01/endpoint-tests-1.json b/models/apis/route53resolver/2018-04-01/endpoint-tests-1.json index 13ea33851e7..d8c59410ea6 100644 --- a/models/apis/route53resolver/2018-04-01/endpoint-tests-1.json +++ b/models/apis/route53resolver/2018-04-01/endpoint-tests-1.json @@ -9,8 +9,8 @@ }, "params": { "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "ap-southeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "eu-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -373,8 +373,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -386,8 +386,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -399,8 +399,21 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53resolver.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { @@ -412,34 +425,34 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://route53resolver-fips.us-gov-east-1.api.aws" + "url": "https://route53resolver.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://route53resolver-fips.us-gov-east-1.amazonaws.com" + "url": "https://route53resolver-fips.us-gov-east-1.api.aws" } }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -451,8 +464,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -464,8 +477,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -477,8 +501,30 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -490,8 +536,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -503,8 +560,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -516,8 +573,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -529,8 +586,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -541,8 +598,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -553,10 +610,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/support/2013-04-15/api-2.json b/models/apis/support/2013-04-15/api-2.json index b1ed0ded044..c3ca8d35877 100644 --- a/models/apis/support/2013-04-15/api-2.json +++ b/models/apis/support/2013-04-15/api-2.json @@ -98,6 +98,19 @@ {"shape":"CaseIdNotFound"} ] }, + "DescribeCreateCaseOptions":{ + "name":"DescribeCreateCaseOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCreateCaseOptionsRequest"}, + "output":{"shape":"DescribeCreateCaseOptionsResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ] + }, "DescribeServices":{ "name":"DescribeServices", "http":{ @@ -122,6 +135,19 @@ {"shape":"InternalServerError"} ] }, + "DescribeSupportedLanguages":{ + "name":"DescribeSupportedLanguages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSupportedLanguagesRequest"}, + "output":{"shape":"DescribeSupportedLanguagesResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ] + }, "DescribeTrustedAdvisorCheckRefreshStatuses":{ "name":"DescribeTrustedAdvisorCheckRefreshStatuses", "http":{ @@ -131,7 +157,8 @@ "input":{"shape":"DescribeTrustedAdvisorCheckRefreshStatusesRequest"}, "output":{"shape":"DescribeTrustedAdvisorCheckRefreshStatusesResponse"}, "errors":[ - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} ] }, "DescribeTrustedAdvisorCheckResult":{ @@ -143,7 +170,8 @@ "input":{"shape":"DescribeTrustedAdvisorCheckResultRequest"}, "output":{"shape":"DescribeTrustedAdvisorCheckResultResponse"}, "errors":[ - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} ] }, "DescribeTrustedAdvisorCheckSummaries":{ @@ -155,7 +183,8 @@ "input":{"shape":"DescribeTrustedAdvisorCheckSummariesRequest"}, "output":{"shape":"DescribeTrustedAdvisorCheckSummariesResponse"}, "errors":[ - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} ] }, "DescribeTrustedAdvisorChecks":{ @@ -167,7 +196,8 @@ "input":{"shape":"DescribeTrustedAdvisorChecksRequest"}, "output":{"shape":"DescribeTrustedAdvisorChecksResponse"}, "errors":[ - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} ] }, "RefreshTrustedAdvisorCheck":{ @@ -288,6 +318,7 @@ "type":"list", "member":{"shape":"Attachment"} }, + "AvailabilityErrorMessage":{"type":"string"}, "BeforeTime":{"type":"string"}, "Boolean":{"type":"boolean"}, "CaseCreationLimitExceeded":{ @@ -353,11 +384,12 @@ "max":10, "min":0 }, + "Code":{"type":"string"}, "Communication":{ "type":"structure", "members":{ "caseId":{"shape":"CaseId"}, - "body":{"shape":"CommunicationBody"}, + "body":{"shape":"ValidatedCommunicationBody"}, "submittedBy":{"shape":"SubmittedBy"}, "timeCreated":{"shape":"TimeCreated"}, "attachmentSet":{"shape":"AttachmentSet"} @@ -372,6 +404,20 @@ "type":"list", "member":{"shape":"Communication"} }, + "CommunicationTypeOptions":{ + "type":"structure", + "members":{ + "type":{"shape":"Type"}, + "supportedHours":{"shape":"SupportedHoursList"}, + "datesWithoutSupport":{"shape":"DatesWithoutSupportList"} + } + }, + "CommunicationTypeOptionsList":{ + "type":"list", + "member":{"shape":"CommunicationTypeOptions"}, + "max":100, + "min":1 + }, "CreateCaseRequest":{ "type":"structure", "required":[ @@ -397,6 +443,17 @@ } }, "Data":{"type":"blob"}, + "DateInterval":{ + "type":"structure", + "members":{ + "startDateTime":{"shape":"ValidatedDateTime"}, + "endDateTime":{"shape":"ValidatedDateTime"} + } + }, + "DatesWithoutSupportList":{ + "type":"list", + "member":{"shape":"DateInterval"} + }, "DescribeAttachmentLimitExceeded":{ "type":"structure", "members":{ @@ -456,6 +513,28 @@ "nextToken":{"shape":"NextToken"} } }, + "DescribeCreateCaseOptionsRequest":{ + "type":"structure", + "required":[ + "issueType", + "serviceCode", + "language", + "categoryCode" + ], + "members":{ + "issueType":{"shape":"IssueType"}, + "serviceCode":{"shape":"ServiceCode"}, + "language":{"shape":"Language"}, + "categoryCode":{"shape":"CategoryCode"} + } + }, + "DescribeCreateCaseOptionsResponse":{ + "type":"structure", + "members":{ + "languageAvailability":{"shape":"ValidatedLanguageAvailability"}, + "communicationTypes":{"shape":"CommunicationTypeOptionsList"} + } + }, "DescribeServicesRequest":{ "type":"structure", "members":{ @@ -481,6 +560,25 @@ "severityLevels":{"shape":"SeverityLevelsList"} } }, + "DescribeSupportedLanguagesRequest":{ + "type":"structure", + "required":[ + "issueType", + "serviceCode", + "categoryCode" + ], + "members":{ + "issueType":{"shape":"ValidatedIssueTypeString"}, + "serviceCode":{"shape":"ValidatedServiceCode"}, + "categoryCode":{"shape":"ValidatedCategoryCode"} + } + }, + "DescribeSupportedLanguagesResponse":{ + "type":"structure", + "members":{ + "supportedLanguages":{"shape":"SupportedLanguagesList"} + } + }, "DescribeTrustedAdvisorCheckRefreshStatusesRequest":{ "type":"structure", "required":["checkIds"], @@ -537,8 +635,10 @@ "checks":{"shape":"TrustedAdvisorCheckList"} } }, + "Display":{"type":"string"}, "DisplayId":{"type":"string"}, "Double":{"type":"double"}, + "EndTime":{"type":"string"}, "ErrorMessage":{"type":"string"}, "ExpiryTime":{"type":"string"}, "FileName":{"type":"string"}, @@ -630,6 +730,7 @@ "type":"list", "member":{"shape":"SeverityLevel"} }, + "StartTime":{"type":"string"}, "Status":{"type":"string"}, "String":{"type":"string"}, "StringList":{ @@ -638,6 +739,39 @@ }, "Subject":{"type":"string"}, "SubmittedBy":{"type":"string"}, + "SupportedHour":{ + "type":"structure", + "members":{ + "startTime":{"shape":"StartTime"}, + "endTime":{"shape":"EndTime"} + } + }, + "SupportedHoursList":{ + "type":"list", + "member":{"shape":"SupportedHour"} + }, + "SupportedLanguage":{ + "type":"structure", + "members":{ + "code":{"shape":"Code"}, + "language":{"shape":"Language"}, + "display":{"shape":"Display"} + } + }, + "SupportedLanguagesList":{ + "type":"list", + "member":{"shape":"SupportedLanguage"}, + "max":100, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"AvailabilityErrorMessage"} + }, + "exception":true, + "synthetic":true + }, "TimeCreated":{"type":"string"}, "TrustedAdvisorCategorySpecificSummary":{ "type":"structure", @@ -768,6 +902,37 @@ "resourcesIgnored":{"shape":"Long"}, "resourcesSuppressed":{"shape":"Long"} } + }, + "Type":{"type":"string"}, + "ValidatedCategoryCode":{ + "type":"string", + "max":100, + "min":0 + }, + "ValidatedCommunicationBody":{ + "type":"string", + "max":8000, + "min":1 + }, + "ValidatedDateTime":{ + "type":"string", + "max":30, + "min":8 + }, + "ValidatedIssueTypeString":{ + "type":"string", + "max":22, + "min":9 + }, + "ValidatedLanguageAvailability":{ + "type":"string", + "max":100, + "min":0 + }, + "ValidatedServiceCode":{ + "type":"string", + "max":100, + "min":0 } } } diff --git a/models/apis/support/2013-04-15/docs-2.json b/models/apis/support/2013-04-15/docs-2.json index 934c89599db..4240140631d 100644 --- a/models/apis/support/2013-04-15/docs-2.json +++ b/models/apis/support/2013-04-15/docs-2.json @@ -8,8 +8,10 @@ "DescribeAttachment": "

Returns the attachment that has the specified ID. Attachments can include screenshots, error logs, or other files that describe your issue. Attachment IDs are generated by the case management system when you add an attachment to a case or case communication. Attachment IDs are returned in the AttachmentDetails objects that are returned by the DescribeCommunications operation.

", "DescribeCases": "

Returns a list of cases that you specify by passing one or more case IDs. You can use the afterTime and beforeTime parameters to filter the cases by date. You can set values for the includeResolvedCases and includeCommunications parameters to specify how much information to return.

The response returns the following in JSON format:

Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request might return an error.

", "DescribeCommunications": "

Returns communications and attachments for one or more support cases. Use the afterTime and beforeTime parameters to filter by date. You can use the caseId parameter to restrict the results to a specific case.

Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.

You can use the maxResults and nextToken parameters to control the pagination of the results. Set maxResults to the number of cases that you want to display on each page, and use nextToken to specify the resumption of pagination.

", + "DescribeCreateCaseOptions": "

Returns a list of CreateCaseOption types along with the corresponding supported hours and language availability. You can specify the language categoryCode, issueType and serviceCode used to retrieve the CreateCaseOptions.

", "DescribeServices": "

Returns the current list of Amazon Web Services services and a list of service categories for each service. You then use service names and categories in your CreateCase requests. Each Amazon Web Services service has its own set of categories.

The service codes and category codes correspond to the values that appear in the Service and Category lists on the Amazon Web Services Support Center Create Case page. The values in those fields don't necessarily match the service codes and categories returned by the DescribeServices operation. Always use the service codes and categories that the DescribeServices operation returns, so that you have the most recent set of service and category codes.

", "DescribeSeverityLevels": "

Returns the list of severity levels that you can assign to a support case. The severity level for a case is also a field in the CaseDetails data type that you include for a CreateCase request.

", + "DescribeSupportedLanguages": "

Returns a list of supported languages for a specified categoryCode, issueType and serviceCode. The returned supported languages will include a ISO 639-1 code for the language, and the language display name.

", "DescribeTrustedAdvisorCheckRefreshStatuses": "

Returns the refresh status of the Trusted Advisor checks that have the specified check IDs. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.

Some checks are refreshed automatically, and you can't return their refresh statuses by using the DescribeTrustedAdvisorCheckRefreshStatuses operation. If you call this operation for these checks, you might see an InvalidParameterValue error.

To call the Trusted Advisor operations in the Amazon Web Services Support API, you must use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe (Ireland) endpoints don't support the Trusted Advisor operations. For more information, see About the Amazon Web Services Support API in the Amazon Web Services Support User Guide.

", "DescribeTrustedAdvisorCheckResult": "

Returns the results of the Trusted Advisor check that has the specified check ID. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.

The response contains a TrustedAdvisorCheckResult object, which contains these three objects:

In addition, the response contains these fields:

To call the Trusted Advisor operations in the Amazon Web Services Support API, you must use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe (Ireland) endpoints don't support the Trusted Advisor operations. For more information, see About the Amazon Web Services Support API in the Amazon Web Services Support User Guide.

", "DescribeTrustedAdvisorCheckSummaries": "

Returns the results for the Trusted Advisor check summaries for the check IDs that you specified. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.

The response contains an array of TrustedAdvisorCheckSummary objects.

To call the Trusted Advisor operations in the Amazon Web Services Support API, you must use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe (Ireland) endpoints don't support the Trusted Advisor operations. For more information, see About the Amazon Web Services Support API in the Amazon Web Services Support User Guide.

", @@ -111,6 +113,12 @@ "AddAttachmentsToSetRequest$attachments": "

One or more attachments to add to the set. You can add up to three attachments per set. The size limit is 5 MB per attachment.

In the Attachment object, use the data parameter to specify the contents of the attachment file. In the previous request syntax, the value for data appear as blob, which is represented as a base64-encoded string. The value for fileName is the name of the attachment, such as troubleshoot-screenshot.png.

" } }, + "AvailabilityErrorMessage": { + "base": null, + "refs": { + "ThrottlingException$message": null + } + }, "BeforeTime": { "base": null, "refs": { @@ -131,7 +139,7 @@ } }, "CaseDetails": { - "base": "

A JSON-formatted object that contains the metadata for a support case. It is contained in the response from a DescribeCases request. CaseDetails contains the following fields:

", + "base": "

A JSON-formatted object that contains the metadata for a support case. It is contained in the response from a DescribeCases request. CaseDetails contains the following fields:

", "refs": { "CaseList$member": null } @@ -183,7 +191,8 @@ "refs": { "CaseDetails$categoryCode": "

The category of problem for the support case.

", "Category$code": "

The category code for the support case.

", - "CreateCaseRequest$categoryCode": "

The category of problem for the support case. You also use the DescribeServices operation to get the category code for a service. Each Amazon Web Services service defines its own set of category codes.

" + "CreateCaseRequest$categoryCode": "

The category of problem for the support case. You also use the DescribeServices operation to get the category code for a service. Each Amazon Web Services service defines its own set of category codes.

", + "DescribeCreateCaseOptionsRequest$categoryCode": "

The category of problem for the support case. You also use the DescribeServices operation to get the category code for a service. Each Amazon Web Services service defines its own set of category codes.

" } }, "CategoryList": { @@ -212,6 +221,12 @@ "CreateCaseRequest$ccEmailAddresses": "

A list of email addresses that Amazon Web Services Support copies on case correspondence. Amazon Web Services Support identifies the account that creates the case when you specify your Amazon Web Services credentials in an HTTP POST method or use the Amazon Web Services SDKs.

" } }, + "Code": { + "base": null, + "refs": { + "SupportedLanguage$code": "

2 digit ISO 639-1 code. e.g. en

" + } + }, "Communication": { "base": "

A communication associated with a support case. The communication consists of the case ID, the message body, attachment information, the submitter of the communication, and the date and time of the communication.

", "refs": { @@ -222,7 +237,6 @@ "base": null, "refs": { "AddCommunicationToCaseRequest$communicationBody": "

The body of an email communication to add to the support case.

", - "Communication$body": "

The text of the communication between the customer and Amazon Web Services Support.

", "CreateCaseRequest$communicationBody": "

The communication body text that describes the issue. This text appears in the Description field on the Amazon Web Services Support Center Create Case page.

" } }, @@ -233,6 +247,18 @@ "RecentCaseCommunications$communications": "

The five most recent communications associated with the case.

" } }, + "CommunicationTypeOptions": { + "base": "

A JSON-formatted object that contains the CommunicationTypeOptions for creating a case for a certain communication channel. It is contained in the response from a DescribeCreateCaseOptions request. CommunicationTypeOptions contains the following fields:

", + "refs": { + "CommunicationTypeOptionsList$member": null + } + }, + "CommunicationTypeOptionsList": { + "base": null, + "refs": { + "DescribeCreateCaseOptionsResponse$communicationTypes": "

A JSON-formatted array that contains the available communication type options, along with the available support timeframes for the given inputs.

" + } + }, "CreateCaseRequest": { "base": null, "refs": { @@ -249,6 +275,18 @@ "Attachment$data": "

The content of the attachment file.

" } }, + "DateInterval": { + "base": "

Date and time (UTC) format in RFC 3339 : 'yyyy-MM-dd'T'HH:mm:ss.SSSZZ'.

", + "refs": { + "DatesWithoutSupportList$member": null + } + }, + "DatesWithoutSupportList": { + "base": null, + "refs": { + "CommunicationTypeOptions$datesWithoutSupport": "

A JSON-formatted list containing date and time ranges for periods without support

" + } + }, "DescribeAttachmentLimitExceeded": { "base": "

The limit for the number of DescribeAttachment requests in a short period of time has been exceeded.

", "refs": { @@ -284,6 +322,16 @@ "refs": { } }, + "DescribeCreateCaseOptionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeCreateCaseOptionsResponse": { + "base": null, + "refs": { + } + }, "DescribeServicesRequest": { "base": null, "refs": { @@ -304,6 +352,16 @@ "refs": { } }, + "DescribeSupportedLanguagesRequest": { + "base": null, + "refs": { + } + }, + "DescribeSupportedLanguagesResponse": { + "base": null, + "refs": { + } + }, "DescribeTrustedAdvisorCheckRefreshStatusesRequest": { "base": null, "refs": { @@ -344,6 +402,12 @@ "refs": { } }, + "Display": { + "base": null, + "refs": { + "SupportedLanguage$display": "

Language display value e.g. ENGLISH

" + } + }, "DisplayId": { "base": null, "refs": { @@ -358,6 +422,12 @@ "TrustedAdvisorCostOptimizingSummary$estimatedPercentMonthlySavings": "

The estimated percentage of savings that might be realized if the recommended operations are taken.

" } }, + "EndTime": { + "base": null, + "refs": { + "SupportedHour$endTime": "

End Time. RFC 3339 format 'HH:mm:ss.SSS'.

" + } + }, "ErrorMessage": { "base": null, "refs": { @@ -405,17 +475,20 @@ "IssueType": { "base": null, "refs": { - "CreateCaseRequest$issueType": "

The type of issue for the case. You can specify customer-service or technical. If you don't specify a value, the default is technical.

" + "CreateCaseRequest$issueType": "

The type of issue for the case. You can specify customer-service or technical. If you don't specify a value, the default is technical.

", + "DescribeCreateCaseOptionsRequest$issueType": "

The type of issue for the case. You can specify customer-service or technical. If you don't specify a value, the default is technical.

" } }, "Language": { "base": null, "refs": { - "CaseDetails$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", - "CreateCaseRequest$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", - "DescribeCasesRequest$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", - "DescribeServicesRequest$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", - "DescribeSeverityLevelsRequest$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

" + "CaseDetails$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", + "CreateCaseRequest$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", + "DescribeCasesRequest$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", + "DescribeCreateCaseOptionsRequest$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", + "DescribeServicesRequest$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", + "DescribeSeverityLevelsRequest$language": "

The language in which Amazon Web Services Support handles the case. Amazon Web Services Support currently supports Chinese (“zh”), English (\"en\"), Japanese (\"ja\") and Korean (“ko”). You must specify the ISO 639-1 code for the language parameter if you want support in that language.

", + "SupportedLanguage$language": "

Full language description e.g. ENGLISH

" } }, "Long": { @@ -488,6 +561,7 @@ "refs": { "CaseDetails$serviceCode": "

The code for the Amazon Web Services service. You can get a list of codes and the corresponding service names by calling DescribeServices.

", "CreateCaseRequest$serviceCode": "

The code for the Amazon Web Services service. You can use the DescribeServices operation to get the possible serviceCode values.

", + "DescribeCreateCaseOptionsRequest$serviceCode": "

The code for the Amazon Web Services service. You can use the DescribeServices operation to get the possible serviceCode values.

", "Service$code": "

The code for an Amazon Web Services service returned by the DescribeServices response. The name element contains the corresponding friendly name.

", "ServiceCodeList$member": null } @@ -541,6 +615,12 @@ "DescribeSeverityLevelsResponse$severityLevels": "

The available severity levels for the support case. Available severity levels are defined by your service level agreement with Amazon Web Services.

" } }, + "StartTime": { + "base": null, + "refs": { + "SupportedHour$startTime": "

Start Time. RFC 3339 format 'HH:mm:ss.SSS'.

" + } + }, "Status": { "base": null, "refs": { @@ -592,7 +672,36 @@ "base": null, "refs": { "CaseDetails$submittedBy": "

The email address of the account that submitted the case.

", - "Communication$submittedBy": "

The identity of the account that submitted, or responded to, the support case. Customer entries include the role or IAM user as well as the email address. For example, \"AdminRole (Role) <janedoe@example.com>. Entries from the Amazon Web Services Support team display \"Amazon Web Services,\" and don't show an email address.

" + "Communication$submittedBy": "

The identity of the account that submitted, or responded to, the support case. Customer entries include the IAM role as well as the email address (for example, \"AdminRole (Role) <janedoe@example.com>). Entries from the Amazon Web Services Support team display \"Amazon Web Services,\" and don't show an email address.

" + } + }, + "SupportedHour": { + "base": "

Time range object with startTime and endTime range in RFC 3339 format. 'HH:mm:ss.SSS'.

", + "refs": { + "SupportedHoursList$member": null + } + }, + "SupportedHoursList": { + "base": null, + "refs": { + "CommunicationTypeOptions$supportedHours": "

A JSON-formatted list containing time ranges when support is available.

" + } + }, + "SupportedLanguage": { + "base": "

A JSON-formatted object that contains the available ISO 639-1 language code, language name and langauge display value. The language code is what should be used in the CreateCase call.

", + "refs": { + "SupportedLanguagesList$member": null + } + }, + "SupportedLanguagesList": { + "base": null, + "refs": { + "DescribeSupportedLanguagesResponse$supportedLanguages": "

A JSON-formatted array that contains the available ISO 639-1 language codes.

" + } + }, + "ThrottlingException": { + "base": "

You have exceeded the maximum allowed TPS (Transactions Per Second) for the operations.

", + "refs": { } }, "TimeCreated": { @@ -676,6 +785,49 @@ "TrustedAdvisorCheckResult$resourcesSummary": null, "TrustedAdvisorCheckSummary$resourcesSummary": null } + }, + "Type": { + "base": null, + "refs": { + "CommunicationTypeOptions$type": "

A string value indicating the communication type. At the moment the type value can assume one of 3 values at the moment chat, web and call.

" + } + }, + "ValidatedCategoryCode": { + "base": null, + "refs": { + "DescribeSupportedLanguagesRequest$categoryCode": "

The category of problem for the support case. You also use the DescribeServices operation to get the category code for a service. Each Amazon Web Services service defines its own set of category codes.

" + } + }, + "ValidatedCommunicationBody": { + "base": null, + "refs": { + "Communication$body": "

The text of the communication between the customer and Amazon Web Services Support.

" + } + }, + "ValidatedDateTime": { + "base": null, + "refs": { + "DateInterval$startDateTime": "

A JSON object containing start and date time (UTC). Date and time format is RFC 3339 : 'yyyy-MM-dd'T'HH:mm:ss.SSSZZ'.

", + "DateInterval$endDateTime": "

End Date Time (UTC). RFC 3339 format : 'yyyy-MM-dd'T'HH:mm:ss.SSSZZ'.

" + } + }, + "ValidatedIssueTypeString": { + "base": null, + "refs": { + "DescribeSupportedLanguagesRequest$issueType": "

The type of issue for the case. You can specify customer-service or technical.

" + } + }, + "ValidatedLanguageAvailability": { + "base": null, + "refs": { + "DescribeCreateCaseOptionsResponse$languageAvailability": "

Language availability can be any of the following:

" + } + }, + "ValidatedServiceCode": { + "base": null, + "refs": { + "DescribeSupportedLanguagesRequest$serviceCode": "

The code for the Amazon Web Services service. You can use the DescribeServices operation to get the possible serviceCode values.

" + } } } } diff --git a/models/apis/support/2013-04-15/endpoint-rule-set-1.json b/models/apis/support/2013-04-15/endpoint-rule-set-1.json index b4c1ecb7491..3898b7fffc1 100644 --- a/models/apis/support/2013-04-15/endpoint-rule-set-1.json +++ b/models/apis/support/2013-04-15/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,64 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "isSet", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws" + "ref": "Region" + } ] } ], @@ -128,22 +111,13 @@ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -151,1146 +125,479 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseFIPS" + }, + false ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseDualStack" + }, + false ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" + "endpoint": { + "url": "https://support.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "support", + "signingRegion": "us-east-1" + } + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws-cn" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://support.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://support.cn-north-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "support", + "signingRegion": "cn-north-1" + } + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws-us-gov" ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://support.us-gov-west-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "support", + "signingRegion": "us-gov-west-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws-iso" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://support.us-iso-east-1.c2s.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "support", + "signingRegion": "us-iso-east-1" + } + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] + }, + "aws-iso-b" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://support.us-isob-east-1.sc2s.sgov.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "support", + "signingRegion": "us-isob-east-1" } ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://support-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://support-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, - true + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsDualStack" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://support.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } - ], + ] + }, + { + "conditions": [], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://support.{Region}.api.amazonwebservices.com.cn", + "url": "https://support.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://support.cn-north-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-north-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-us-gov" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://support.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-iso" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.c2s.ic.gov", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://support.us-iso-east-1.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-iso-east-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-iso-b" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.sc2s.sgov.gov", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://support.us-isob-east-1.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-isob-east-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://support.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://support.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://support.cn-north-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-north-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-us-gov-global" - ] - } - ], - "endpoint": { - "url": "https://support.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-iso-global" - ] - } - ], - "endpoint": { - "url": "https://support.us-iso-east-1.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-iso-east-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-iso-b-global" - ] - } - ], - "endpoint": { - "url": "https://support.us-isob-east-1.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-isob-east-1", - "signingName": "support" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://support.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/support/2013-04-15/endpoint-tests-1.json b/models/apis/support/2013-04-15/endpoint-tests-1.json index 03d6016ad73..6bd6b8fa747 100644 --- a/models/apis/support/2013-04-15/endpoint-tests-1.json +++ b/models/apis/support/2013-04-15/endpoint-tests-1.json @@ -17,9 +17,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-global", "UseFIPS": false, - "Region": "aws-global" + "UseDualStack": false } }, { @@ -30,9 +30,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -43,9 +43,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -56,9 +56,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -78,9 +78,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -100,9 +100,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-cn-global", "UseFIPS": false, - "Region": "aws-cn-global" + "UseDualStack": false } }, { @@ -113,9 +113,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -126,9 +126,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -139,9 +139,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -161,9 +161,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -183,9 +183,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-us-gov-global", "UseFIPS": false, - "Region": "aws-us-gov-global" + "UseDualStack": false } }, { @@ -196,9 +196,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -209,9 +209,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -222,9 +222,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -244,9 +244,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -266,9 +266,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-iso-global", "UseFIPS": false, - "Region": "aws-iso-global" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -279,9 +290,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -301,9 +323,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -323,9 +345,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-iso-b-global", "UseFIPS": false, - "Region": "aws-iso-b-global" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -336,9 +369,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -358,22 +402,35 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -383,9 +440,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -395,11 +452,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index f3e84a7d000..ce59619357a 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -18016,6 +18016,12 @@ "cn-northwest-1" : { } } }, + "emr-serverless" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "es" : { "endpoints" : { "cn-north-1" : { }, diff --git a/service/connect/api.go b/service/connect/api.go index 0beeb59378e..0cdab1aa930 100644 --- a/service/connect/api.go +++ b/service/connect/api.go @@ -8273,7 +8273,7 @@ func (c *Connect) GetMetricDataV2Request(input *GetMetricDataV2Input) (req *requ // the previous version of this API. It has new metrics, offers filtering at // a metric level, and offers the ability to filter and group data by channels, // queues, routing profiles, agents, and agent hierarchy levels. It can retrieve -// historical data for the last 14 days, in 24-hour intervals. +// historical data for the last 35 days, in 24-hour intervals. // // For a description of the historical metrics that are supported by GetMetricDataV2 // and GetMetricData, see Historical metrics definitions (https://docs.aws.amazon.com/connect/latest/adminguide/historical-metrics-definitions.html) @@ -18070,6 +18070,12 @@ func (c *Connect) UpdatePhoneNumberRequest(input *UpdatePhoneNumberInput) (req * // or traffic distribution group to another Amazon Connect instance or traffic // distribution group in the same Amazon Web Services Region. // +// After using this API, you must verify that the phone number is attached to +// the correct flow in the target instance or traffic distribution group. You +// need to do this because the API switches only the phone number to a new instance +// or traffic distribution group. It doesn't migrate the flow configuration +// of the phone number, too. +// // You can call DescribePhoneNumber (https://docs.aws.amazon.com/connect/latest/APIReference/API_DescribePhoneNumber.html) // API to verify the status of a previous UpdatePhoneNumber (https://docs.aws.amazon.com/connect/latest/APIReference/API_UpdatePhoneNumber.html) // operation. @@ -35312,8 +35318,8 @@ type GetMetricDataV2Input struct { // The timestamp, in UNIX Epoch time format, at which to start the reporting // interval for the retrieval of historical metrics data. The time must be before // the end time timestamp. The time range between the start and end time must - // be less than 24 hours. The start time cannot be earlier than 14 days before - // the time of the request. Historical metrics are available for 14 days. + // be less than 24 hours. The start time cannot be earlier than 35 days before + // the time of the request. Historical metrics are available for 35 days. // // StartTime is a required field StartTime *time.Time `type:"timestamp" required:"true"` @@ -42714,6 +42720,8 @@ type MetricV2 struct { MetricFilters []*MetricFilterV2 `type:"list"` // The name of the metric. + // + // This parameter is required. The following Required = No is incorrect. Name *string `type:"string"` // Contains information about the threshold for service level metrics. diff --git a/service/elasticache/api.go b/service/elasticache/api.go index a3e5e0ce8db..067e71f5145 100644 --- a/service/elasticache/api.go +++ b/service/elasticache/api.go @@ -8140,7 +8140,7 @@ type CacheCluster struct { // The network type associated with the cluster, either ipv4 | ipv6. IPv6 is // supported for workloads using Redis engine version 6.2 onward or Memcached - // engine version 1.6.6 on all instances built on the Nitro system (https://aws.amazon.com/ec2/nitro/). + // engine version 1.6.6 on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). IpDiscovery *string `type:"string" enum:"IpDiscovery"` // Returns the destination, format and type of the logs. @@ -8148,7 +8148,7 @@ type CacheCluster struct { // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads // using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on - // all instances built on the Nitro system (https://aws.amazon.com/ec2/nitro/). + // all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). NetworkType *string `type:"string" enum:"NetworkType"` // Describes a notification topic and its status. Notification topics are used @@ -8463,7 +8463,7 @@ type CacheEngineVersion struct { // The name of the cache parameter group family associated with this cache engine. // // Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x + // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 CacheParameterGroupFamily *string `type:"string"` // The name of the cache engine. @@ -8930,7 +8930,7 @@ type CacheParameterGroup struct { // is compatible with. // // Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | + // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 CacheParameterGroupFamily *string `type:"string"` // The name of the cache parameter group. @@ -9218,7 +9218,7 @@ type CacheSubnetGroup struct { // Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis // engine version 6.2 onward or Memcached engine version 1.6.6 on all instances - // built on the Nitro system (https://aws.amazon.com/ec2/nitro/). + // built on the Nitro system (http://aws.amazon.com/ec2/nitro/). SupportedNetworkTypes []*string `type:"list" enum:"NetworkType"` // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet @@ -9774,7 +9774,7 @@ type CreateCacheClusterInput struct { // The network type you choose when modifying a cluster, either ipv4 | ipv6. // IPv6 is supported for workloads using Redis engine version 6.2 onward or // Memcached engine version 1.6.6 on all instances built on the Nitro system - // (https://aws.amazon.com/ec2/nitro/). + // (http://aws.amazon.com/ec2/nitro/). IpDiscovery *string `type:"string" enum:"IpDiscovery"` // Specifies the destination, format and type of the logs. @@ -9782,7 +9782,7 @@ type CreateCacheClusterInput struct { // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads // using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on - // all instances built on the Nitro system (https://aws.amazon.com/ec2/nitro/). + // all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). NetworkType *string `type:"string" enum:"NetworkType"` // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service @@ -9904,9 +9904,6 @@ type CreateCacheClusterInput struct { Tags []*Tag `locationNameList:"Tag" type:"list"` // A flag that enables in-transit encryption when set to true. - // - // Only available when creating a cache cluster in an Amazon VPC using Memcached - // version 1.6.12 or later. TransitEncryptionEnabled *bool `type:"boolean"` } @@ -10160,7 +10157,7 @@ type CreateCacheParameterGroupInput struct { // can be used with. // // Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x + // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 // // CacheParameterGroupFamily is a required field CacheParameterGroupFamily *string `type:"string" required:"true"` @@ -10753,6 +10750,14 @@ type CreateReplicationGroupInput struct { // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). CacheSubnetGroupName *string `type:"string"` + // Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you + // must first set the cluster mode to Compatible. Compatible mode allows your + // Redis clients to connect using both cluster mode enabled and cluster mode + // disabled. After you migrate all Redis clients to use cluster mode enabled, + // you can then complete cluster mode configuration and set the cluster mode + // to Enabled. + ClusterMode *string `type:"string" enum:"ClusterMode"` + // Enables data tiering. Data tiering is only supported for replication groups // using the r6gd node type. This parameter must be set to true when using r6gd // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). @@ -10780,7 +10785,7 @@ type CreateReplicationGroupInput struct { // The network type you choose when creating a replication group, either ipv4 // | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward // or Memcached engine version 1.6.6 on all instances built on the Nitro system - // (https://aws.amazon.com/ec2/nitro/). + // (http://aws.amazon.com/ec2/nitro/). IpDiscovery *string `type:"string" enum:"IpDiscovery"` // The ID of the KMS key used to encrypt the disk in the cluster. @@ -10795,7 +10800,7 @@ type CreateReplicationGroupInput struct { // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads // using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on - // all instances built on the Nitro system (https://aws.amazon.com/ec2/nitro/). + // all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). NetworkType *string `type:"string" enum:"NetworkType"` // A list of node group (shard) configuration options. Each node group (shard) @@ -10985,8 +10990,10 @@ type CreateReplicationGroupInput struct { // connections only. // // Setting TransitEncryptionMode to required is a two-step process that requires - // you to first set the TransitEncryptionMode to preferred first, after that - // you can set TransitEncryptionMode to required. + // you to first set the TransitEncryptionMode to preferred, after that you can + // set TransitEncryptionMode to required. + // + // This process will not trigger the replacement of the replication group. TransitEncryptionMode *string `type:"string" enum:"TransitEncryptionMode"` // The user group to associate with the replication group. @@ -11088,6 +11095,12 @@ func (s *CreateReplicationGroupInput) SetCacheSubnetGroupName(v string) *CreateR return s } +// SetClusterMode sets the ClusterMode field's value. +func (s *CreateReplicationGroupInput) SetClusterMode(v string) *CreateReplicationGroupInput { + s.ClusterMode = &v + return s +} + // SetDataTieringEnabled sets the DataTieringEnabled field's value. func (s *CreateReplicationGroupInput) SetDataTieringEnabled(v bool) *CreateReplicationGroupInput { s.DataTieringEnabled = &v @@ -15484,7 +15497,7 @@ type EngineDefaults struct { // default parameters apply. // // Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 | redis6.x + // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.0 | redis6.x | redis7 CacheParameterGroupFamily *string `type:"string"` // Provides an identifier to allow retrieval of paginated results. @@ -16784,7 +16797,7 @@ type ModifyCacheClusterInput struct { // The network type you choose when modifying a cluster, either ipv4 | ipv6. // IPv6 is supported for workloads using Redis engine version 6.2 onward or // Memcached engine version 1.6.6 on all instances built on the Nitro system - // (https://aws.amazon.com/ec2/nitro/). + // (http://aws.amazon.com/ec2/nitro/). IpDiscovery *string `type:"string" enum:"IpDiscovery"` // Specifies the destination, format and type of the logs. @@ -17501,6 +17514,14 @@ type ModifyReplicationGroupInput struct { // not be Default. CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + // Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you + // must first set the cluster mode to Compatible. Compatible mode allows your + // Redis clients to connect using both cluster mode enabled and cluster mode + // disabled. After you migrate all Redis clients to use cluster mode enabled, + // you can then complete cluster mode configuration and set the cluster mode + // to Enabled. + ClusterMode *string `type:"string" enum:"ClusterMode"` + // The upgraded version of the cache engine to be run on the clusters in the // replication group. // @@ -17514,7 +17535,7 @@ type ModifyReplicationGroupInput struct { // The network type you choose when modifying a cluster, either ipv4 | ipv6. // IPv6 is supported for workloads using Redis engine version 6.2 onward or // Memcached engine version 1.6.6 on all instances built on the Nitro system - // (https://aws.amazon.com/ec2/nitro/). + // (http://aws.amazon.com/ec2/nitro/). IpDiscovery *string `type:"string" enum:"IpDiscovery"` // Specifies the destination, format and type of the logs. @@ -17625,8 +17646,8 @@ type ModifyReplicationGroupInput struct { // to required to allow encrypted connections only. // // Setting TransitEncryptionMode to required is a two-step process that requires - // you to first set the TransitEncryptionMode to preferred first, after that - // you can set TransitEncryptionMode to required. + // you to first set the TransitEncryptionMode to preferred, after that you can + // set TransitEncryptionMode to required. TransitEncryptionMode *string `type:"string" enum:"TransitEncryptionMode"` // The ID of the user group you are associating with the replication group. @@ -17716,6 +17737,12 @@ func (s *ModifyReplicationGroupInput) SetCacheSecurityGroupNames(v []*string) *M return s } +// SetClusterMode sets the ClusterMode field's value. +func (s *ModifyReplicationGroupInput) SetClusterMode(v string) *ModifyReplicationGroupInput { + s.ClusterMode = &v + return s +} + // SetEngineVersion sets the EngineVersion field's value. func (s *ModifyReplicationGroupInput) SetEngineVersion(v string) *ModifyReplicationGroupInput { s.EngineVersion = &v @@ -19868,6 +19895,14 @@ type ReplicationGroup struct { // Valid values: true | false ClusterEnabled *bool `type:"boolean"` + // Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you + // must first set the cluster mode to Compatible. Compatible mode allows your + // Redis clients to connect using both cluster mode enabled and cluster mode + // disabled. After you migrate all Redis clients to use cluster mode enabled, + // you can then complete cluster mode configuration and set the cluster mode + // to Enabled. + ClusterMode *string `type:"string" enum:"ClusterMode"` + // The configuration endpoint for this replication group. Use the configuration // endpoint to connect to this replication group. ConfigurationEndpoint *Endpoint `type:"structure"` @@ -19887,7 +19922,7 @@ type ReplicationGroup struct { // The network type you choose when modifying a cluster, either ipv4 | ipv6. // IPv6 is supported for workloads using Redis engine version 6.2 onward or // Memcached engine version 1.6.6 on all instances built on the Nitro system - // (https://aws.amazon.com/ec2/nitro/). + // (http://aws.amazon.com/ec2/nitro/). IpDiscovery *string `type:"string" enum:"IpDiscovery"` // The ID of the KMS key used to encrypt the disk in the cluster. @@ -19908,7 +19943,7 @@ type ReplicationGroup struct { // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads // using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on - // all instances built on the Nitro system (https://aws.amazon.com/ec2/nitro/). + // all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). NetworkType *string `type:"string" enum:"NetworkType"` // A list of node groups in this replication group. For Redis (cluster mode @@ -20036,6 +20071,12 @@ func (s *ReplicationGroup) SetClusterEnabled(v bool) *ReplicationGroup { return s } +// SetClusterMode sets the ClusterMode field's value. +func (s *ReplicationGroup) SetClusterMode(v string) *ReplicationGroup { + s.ClusterMode = &v + return s +} + // SetConfigurationEndpoint sets the ConfigurationEndpoint field's value. func (s *ReplicationGroup) SetConfigurationEndpoint(v *Endpoint) *ReplicationGroup { s.ConfigurationEndpoint = v @@ -20179,6 +20220,14 @@ type ReplicationGroupPendingModifiedValues struct { // Indicates the status of automatic failover for this Redis replication group. AutomaticFailoverStatus *string `type:"string" enum:"PendingAutomaticFailoverStatus"` + // Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you + // must first set the cluster mode to Compatible. Compatible mode allows your + // Redis clients to connect using both cluster mode enabled and cluster mode + // disabled. After you migrate all Redis clients to use cluster mode enabled, + // you can then complete cluster mode configuration and set the cluster mode + // to Enabled. + ClusterMode *string `type:"string" enum:"ClusterMode"` + // The log delivery configurations being modified LogDeliveryConfigurations []*PendingLogDeliveryConfiguration `locationName:"PendingLogDeliveryConfiguration" type:"list"` @@ -20230,6 +20279,12 @@ func (s *ReplicationGroupPendingModifiedValues) SetAutomaticFailoverStatus(v str return s } +// SetClusterMode sets the ClusterMode field's value. +func (s *ReplicationGroupPendingModifiedValues) SetClusterMode(v string) *ReplicationGroupPendingModifiedValues { + s.ClusterMode = &v + return s +} + // SetLogDeliveryConfigurations sets the LogDeliveryConfigurations field's value. func (s *ReplicationGroupPendingModifiedValues) SetLogDeliveryConfigurations(v []*PendingLogDeliveryConfiguration) *ReplicationGroupPendingModifiedValues { s.LogDeliveryConfigurations = v @@ -21563,7 +21618,7 @@ type Subnet struct { // Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis // engine version 6.2 onward or Memcached engine version 1.6.6 on all instances - // built on the Nitro system (https://aws.amazon.com/ec2/nitro/). + // built on the Nitro system (http://aws.amazon.com/ec2/nitro/). SupportedNetworkTypes []*string `type:"list" enum:"NetworkType"` } @@ -22500,6 +22555,26 @@ func ChangeType_Values() []string { } } +const ( + // ClusterModeEnabled is a ClusterMode enum value + ClusterModeEnabled = "enabled" + + // ClusterModeDisabled is a ClusterMode enum value + ClusterModeDisabled = "disabled" + + // ClusterModeCompatible is a ClusterMode enum value + ClusterModeCompatible = "compatible" +) + +// ClusterMode_Values returns all elements of the ClusterMode enum +func ClusterMode_Values() []string { + return []string{ + ClusterModeEnabled, + ClusterModeDisabled, + ClusterModeCompatible, + } +} + const ( // DataTieringStatusEnabled is a DataTieringStatus enum value DataTieringStatusEnabled = "enabled" diff --git a/service/elasticsearchservice/api.go b/service/elasticsearchservice/api.go index ad5a5e15ed2..d50c91a3f65 100644 --- a/service/elasticsearchservice/api.go +++ b/service/elasticsearchservice/api.go @@ -9121,8 +9121,8 @@ type DescribePackagesFilter struct { // Any field from PackageDetails. Name *string `type:"string" enum:"DescribePackagesFilterName"` - // A list of values for the specified field. - Value []*string `type:"list"` + // A non-empty list of values for the specified field. + Value []*string `min:"1" type:"list"` } // String returns the string representation. @@ -9143,6 +9143,19 @@ func (s DescribePackagesFilter) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePackagesFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePackagesFilter"} + if s.Value != nil && len(s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetName sets the Name field's value. func (s *DescribePackagesFilter) SetName(v string) *DescribePackagesFilter { s.Name = &v @@ -9188,6 +9201,26 @@ func (s DescribePackagesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePackagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePackagesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetFilters sets the Filters field's value. func (s *DescribePackagesInput) SetFilters(v []*DescribePackagesFilter) *DescribePackagesInput { s.Filters = v diff --git a/service/health/api.go b/service/health/api.go index 227591f3a04..8de6db1adaa 100644 --- a/service/health/api.go +++ b/service/health/api.go @@ -210,9 +210,9 @@ func (c *Health) DescribeAffectedEntitiesRequest(input *DescribeAffectedEntities // Returns a list of entities that have been affected by the specified events, // based on the specified filter criteria. Entities can refer to individual // customer resources, groups of customer resources, or any other construct, -// depending on the Amazon Web Services service. Events that have impact beyond -// that of the affected entities, or where the extent of impact is unknown, -// include at least one entity indicating this. +// depending on the Amazon Web Service. Events that have impact beyond that +// of the affected entities, or where the extent of impact is unknown, include +// at least one entity indicating this. // // At least one event ARN is required. // @@ -365,7 +365,7 @@ func (c *Health) DescribeAffectedEntitiesForOrganizationRequest(input *DescribeA // for one or more accounts in your organization in Organizations, based on // the filter criteria. Entities can refer to individual customer resources, // groups of customer resources, or any other construct, depending on the Amazon -// Web Services service. +// Web Service. // // At least one event Amazon Resource Name (ARN) and account ID are required. // @@ -938,8 +938,8 @@ func (c *Health) DescribeEventTypesRequest(input *DescribeEventTypesInput) (req // // Returns the event types that meet the specified filter criteria. You can // use this API operation to find information about the Health event, such as -// the category, Amazon Web Services service, and event code. The metadata for -// each event appears in the EventType (https://docs.aws.amazon.com/health/latest/APIReference/API_EventType.html) +// the category, Amazon Web Service, and event code. The metadata for each event +// appears in the EventType (https://docs.aws.amazon.com/health/latest/APIReference/API_EventType.html) // object. // // If you don't specify a filter criteria, the API operation returns all event @@ -1403,9 +1403,8 @@ func (c *Health) DescribeHealthServiceStatusForOrganizationRequest(input *Descri // DescribeHealthServiceStatusForOrganization API operation for AWS Health APIs and Notifications. // // This operation provides status information on enabling or disabling Health -// to work with your organization. To call this operation, you must sign in -// as an IAM user, assume an IAM role, or sign in as the root user (not recommended) -// in the organization's management account. +// to work with your organization. To call this operation, you must use the +// organization's management account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1480,9 +1479,8 @@ func (c *Health) DisableHealthServiceAccessForOrganizationRequest(input *Disable // DisableHealthServiceAccessForOrganization API operation for AWS Health APIs and Notifications. // // Disables Health from working with Organizations. To call this operation, -// you must sign in as an Identity and Access Management (IAM) user, assume -// an IAM role, or sign in as the root user (not recommended) in the organization's -// management account. For more information, see Aggregating Health events (https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) +// you must sign in to the organization's management account. For more information, +// see Aggregating Health events (https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) // in the Health User Guide. // // This operation doesn't remove the service-linked role from the management @@ -1946,8 +1944,8 @@ type DescribeAffectedAccountsForOrganizationOutput struct { // A JSON set of elements of the affected accounts. AffectedAccounts []*string `locationName:"affectedAccounts" type:"list"` - // This parameter specifies if the Health event is a public Amazon Web Services - // service event or an account-specific event. + // This parameter specifies if the Health event is a public Amazon Web Service + // event or an account-specific event. // // * If the eventScopeCode value is PUBLIC, then the affectedAccounts value // is always empty. @@ -2731,6 +2729,9 @@ type DescribeEventTypesInput struct { Locale *string `locationName:"locale" min:"2" type:"string"` // The maximum number of items to return in one batch, between 10 and 100, inclusive. + // + // If you don't specify the maxResults parameter, this operation returns a maximum + // of 30 items by default. MaxResults *int64 `locationName:"maxResults" min:"10" type:"integer"` // If the results of a search are large, only a portion of the results are returned, @@ -3453,8 +3454,8 @@ type Event struct { // The date and time that the event ended. EndTime *time.Time `locationName:"endTime" type:"timestamp"` - // This parameter specifies if the Health event is a public Amazon Web Services - // service event or an account-specific event. + // This parameter specifies if the Health event is a public Amazon Web Service + // event or an account-specific event. // // * If the eventScopeCode value is PUBLIC, then the affectedAccounts value // is always empty. @@ -3484,8 +3485,7 @@ type Event struct { // The Amazon Web Services Region name of the event. Region *string `locationName:"region" min:"2" type:"string"` - // The Amazon Web Services service that is affected by the event. For example, - // EC2, RDS. + // The Amazon Web Service that is affected by the event. For example, EC2, RDS. Service *string `locationName:"service" min:"2" type:"string"` // The date and time that the event began. @@ -3872,8 +3872,7 @@ type EventFilter struct { // A list of Amazon Web Services Regions. Regions []*string `locationName:"regions" min:"1" type:"list"` - // The Amazon Web Services services associated with the event. For example, - // EC2, RDS. + // The Amazon Web Services associated with the event. For example, EC2, RDS. Services []*string `locationName:"services" min:"1" type:"list"` // A list of dates and times that the event began. @@ -4049,8 +4048,7 @@ type EventType struct { // ; for example, AWS_EC2_SYSTEM_MAINTENANCE_EVENT. Code *string `locationName:"code" min:"3" type:"string"` - // The Amazon Web Services service that is affected by the event. For example, - // EC2, RDS. + // The Amazon Web Service that is affected by the event. For example, EC2, RDS. Service *string `locationName:"service" min:"2" type:"string"` } @@ -4103,8 +4101,7 @@ type EventTypeFilter struct { // A list of event type codes. EventTypeCodes []*string `locationName:"eventTypeCodes" min:"1" type:"list"` - // The Amazon Web Services services associated with the event. For example, - // EC2, RDS. + // The Amazon Web Services associated with the event. For example, EC2, RDS. Services []*string `locationName:"services" min:"1" type:"list"` } @@ -4237,8 +4234,13 @@ type OrganizationAffectedEntitiesErrorItem struct { // entities. AwsAccountId *string `locationName:"awsAccountId" type:"string"` - // The unique identifier for the event type. The format is AWS_SERVICE_DESCRIPTION. - // For example, AWS_EC2_SYSTEM_MAINTENANCE_EVENT. + // A message that describes the error. Follow the error message and retry your + // request. + // + // For example, the InvalidAccountInputError error message appears if you call + // the DescribeAffectedEntitiesForOrganization operation and specify the AccountSpecific + // value for the EventScopeCode parameter, but don't specify an Amazon Web Services + // account. ErrorMessage *string `locationName:"errorMessage" type:"string"` // The name of the error. @@ -4312,8 +4314,8 @@ type OrganizationEvent struct { // The date and time that the event ended. EndTime *time.Time `locationName:"endTime" type:"timestamp"` - // This parameter specifies if the Health event is a public Amazon Web Services - // service event or an account-specific event. + // This parameter specifies if the Health event is a public Amazon Web Service + // event or an account-specific event. // // * If the eventScopeCode value is PUBLIC, then the affectedAccounts value // is always empty. @@ -4343,8 +4345,7 @@ type OrganizationEvent struct { // The Amazon Web Services Region name of the event. Region *string `locationName:"region" min:"2" type:"string"` - // The Amazon Web Services service that is affected by the event, such as EC2 - // and RDS. + // The Amazon Web Service that is affected by the event, such as EC2 and RDS. Service *string `locationName:"service" min:"2" type:"string"` // The date and time that the event began. @@ -4644,8 +4645,7 @@ type OrganizationEventFilter struct { // A list of Amazon Web Services Regions. Regions []*string `locationName:"regions" min:"1" type:"list"` - // The Amazon Web Services services associated with the event. For example, - // EC2, RDS. + // The Amazon Web Services associated with the event. For example, EC2, RDS. Services []*string `locationName:"services" min:"1" type:"list"` // A range of dates and times that is used by the EventFilter (https://docs.aws.amazon.com/health/latest/APIReference/API_EventFilter.html) diff --git a/service/health/doc.go b/service/health/doc.go index af8ce5291e9..b97b1bc59f1 100644 --- a/service/health/doc.go +++ b/service/health/doc.go @@ -3,24 +3,31 @@ // Package health provides the client and types for making API // requests to AWS Health APIs and Notifications. // -// The Health API provides programmatic access to the Health information that -// appears in the Personal Health Dashboard (https://phd.aws.amazon.com/phd/home#/). -// You can use the API operations to get information about events that might -// affect your Amazon Web Services services and resources. -// -// - You must have a Business, Enterprise On-Ramp, or Enterprise Support -// plan from Amazon Web Services Support (http://aws.amazon.com/premiumsupport/) -// to use the Health API. If you call the Health API from an Amazon Web Services -// account that doesn't have a Business, Enterprise On-Ramp, or Enterprise -// Support plan, you receive a SubscriptionRequiredException error. -// -// - You can use the Health endpoint health.us-east-1.amazonaws.com (HTTPS) -// to call the Health API operations. Health supports a multi-Region application -// architecture and has two regional endpoints in an active-passive configuration. -// You can use the high availability endpoint example to determine which -// Amazon Web Services Region is active, so that you can get the latest information -// from the API. For more information, see Accessing the Health API (https://docs.aws.amazon.com/health/latest/ug/health-api.html) -// in the Health User Guide. +// The Health API provides access to the Health information that appears in +// the Health Dashboard (https://health.aws.amazon.com/health/home). You can +// use the API operations to get information about events that might affect +// your Amazon Web Services and resources. +// +// You must have a Business, Enterprise On-Ramp, or Enterprise Support plan +// from Amazon Web Services Support (http://aws.amazon.com/premiumsupport/) +// to use the Health API. If you call the Health API from an Amazon Web Services +// account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support +// plan, you receive a SubscriptionRequiredException error. +// +// For API access, you need an access key ID and a secret access key. Use temporary +// credentials instead of long-term access keys when possible. Temporary credentials +// include an access key ID, a secret access key, and a security token that +// indicates when the credentials expire. For more information, see Best practices +// for managing Amazon Web Services access keys (https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html) +// in the Amazon Web Services General Reference. +// +// You can use the Health endpoint health.us-east-1.amazonaws.com (HTTPS) to +// call the Health API operations. Health supports a multi-Region application +// architecture and has two regional endpoints in an active-passive configuration. +// You can use the high availability endpoint example to determine which Amazon +// Web Services Region is active, so that you can get the latest information +// from the API. For more information, see Accessing the Health API (https://docs.aws.amazon.com/health/latest/ug/health-api.html) +// in the Health User Guide. // // For authentication of requests, Health uses the Signature Version 4 Signing // Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). diff --git a/service/ivsrealtime/api.go b/service/ivsrealtime/api.go index 7277d998a4b..2ac21d0e7ca 100644 --- a/service/ivsrealtime/api.go +++ b/service/ivsrealtime/api.go @@ -360,6 +360,88 @@ func (c *IVSRealTime) DisconnectParticipantWithContext(ctx aws.Context, input *D return out, req.Send() } +const opGetParticipant = "GetParticipant" + +// GetParticipantRequest generates a "aws/request.Request" representing the +// client's request for the GetParticipant operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetParticipant for more information on using the GetParticipant +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetParticipantRequest method. +// req, resp := client.GetParticipantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/GetParticipant +func (c *IVSRealTime) GetParticipantRequest(input *GetParticipantInput) (req *request.Request, output *GetParticipantOutput) { + op := &request.Operation{ + Name: opGetParticipant, + HTTPMethod: "POST", + HTTPPath: "/GetParticipant", + } + + if input == nil { + input = &GetParticipantInput{} + } + + output = &GetParticipantOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetParticipant API operation for Amazon Interactive Video Service RealTime. +// +// Gets information about the specified participant token. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Interactive Video Service RealTime's +// API operation GetParticipant for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// +// - ValidationException +// +// - AccessDeniedException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/GetParticipant +func (c *IVSRealTime) GetParticipant(input *GetParticipantInput) (*GetParticipantOutput, error) { + req, out := c.GetParticipantRequest(input) + return out, req.Send() +} + +// GetParticipantWithContext is the same as GetParticipant with the addition of +// the ability to pass a context and additional request options. +// +// See GetParticipant for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IVSRealTime) GetParticipantWithContext(ctx aws.Context, input *GetParticipantInput, opts ...request.Option) (*GetParticipantOutput, error) { + req, out := c.GetParticipantRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetStage = "GetStage" // GetStageRequest generates a "aws/request.Request" representing the @@ -442,36 +524,118 @@ func (c *IVSRealTime) GetStageWithContext(ctx aws.Context, input *GetStageInput, return out, req.Send() } -const opListStages = "ListStages" +const opGetStageSession = "GetStageSession" -// ListStagesRequest generates a "aws/request.Request" representing the -// client's request for the ListStages operation. The "output" return +// GetStageSessionRequest generates a "aws/request.Request" representing the +// client's request for the GetStageSession operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListStages for more information on using the ListStages +// See GetStageSession for more information on using the GetStageSession // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the ListStagesRequest method. -// req, resp := client.ListStagesRequest(params) +// // Example sending a request using the GetStageSessionRequest method. +// req, resp := client.GetStageSessionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListStages -func (c *IVSRealTime) ListStagesRequest(input *ListStagesInput) (req *request.Request, output *ListStagesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/GetStageSession +func (c *IVSRealTime) GetStageSessionRequest(input *GetStageSessionInput) (req *request.Request, output *GetStageSessionOutput) { op := &request.Operation{ - Name: opListStages, + Name: opGetStageSession, HTTPMethod: "POST", - HTTPPath: "/ListStages", + HTTPPath: "/GetStageSession", + } + + if input == nil { + input = &GetStageSessionInput{} + } + + output = &GetStageSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetStageSession API operation for Amazon Interactive Video Service RealTime. +// +// Gets information for the specified stage session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Interactive Video Service RealTime's +// API operation GetStageSession for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// +// - ValidationException +// +// - AccessDeniedException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/GetStageSession +func (c *IVSRealTime) GetStageSession(input *GetStageSessionInput) (*GetStageSessionOutput, error) { + req, out := c.GetStageSessionRequest(input) + return out, req.Send() +} + +// GetStageSessionWithContext is the same as GetStageSession with the addition of +// the ability to pass a context and additional request options. +// +// See GetStageSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IVSRealTime) GetStageSessionWithContext(ctx aws.Context, input *GetStageSessionInput, opts ...request.Option) (*GetStageSessionOutput, error) { + req, out := c.GetStageSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListParticipantEvents = "ListParticipantEvents" + +// ListParticipantEventsRequest generates a "aws/request.Request" representing the +// client's request for the ListParticipantEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListParticipantEvents for more information on using the ListParticipantEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListParticipantEventsRequest method. +// req, resp := client.ListParticipantEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListParticipantEvents +func (c *IVSRealTime) ListParticipantEventsRequest(input *ListParticipantEventsInput) (req *request.Request, output *ListParticipantEventsOutput) { + op := &request.Operation{ + Name: opListParticipantEvents, + HTTPMethod: "POST", + HTTPPath: "/ListParticipantEvents", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, @@ -481,25 +645,25 @@ func (c *IVSRealTime) ListStagesRequest(input *ListStagesInput) (req *request.Re } if input == nil { - input = &ListStagesInput{} + input = &ListParticipantEventsInput{} } - output = &ListStagesOutput{} + output = &ListParticipantEventsOutput{} req = c.newRequest(op, input, output) return } -// ListStages API operation for Amazon Interactive Video Service RealTime. +// ListParticipantEvents API operation for Amazon Interactive Video Service RealTime. // -// Gets summary information about all stages in your account, in the AWS region -// where the API request is processed. +// Lists events for a specified participant that occurred during a specified +// stage session. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Interactive Video Service RealTime's -// API operation ListStages for usage and error information. +// API operation ListParticipantEvents for usage and error information. // // Returned Error Types: // @@ -507,66 +671,64 @@ func (c *IVSRealTime) ListStagesRequest(input *ListStagesInput) (req *request.Re // // - AccessDeniedException // -// - ConflictException -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListStages -func (c *IVSRealTime) ListStages(input *ListStagesInput) (*ListStagesOutput, error) { - req, out := c.ListStagesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListParticipantEvents +func (c *IVSRealTime) ListParticipantEvents(input *ListParticipantEventsInput) (*ListParticipantEventsOutput, error) { + req, out := c.ListParticipantEventsRequest(input) return out, req.Send() } -// ListStagesWithContext is the same as ListStages with the addition of +// ListParticipantEventsWithContext is the same as ListParticipantEvents with the addition of // the ability to pass a context and additional request options. // -// See ListStages for details on how to use this API operation. +// See ListParticipantEvents for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IVSRealTime) ListStagesWithContext(ctx aws.Context, input *ListStagesInput, opts ...request.Option) (*ListStagesOutput, error) { - req, out := c.ListStagesRequest(input) +func (c *IVSRealTime) ListParticipantEventsWithContext(ctx aws.Context, input *ListParticipantEventsInput, opts ...request.Option) (*ListParticipantEventsOutput, error) { + req, out := c.ListParticipantEventsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListStagesPages iterates over the pages of a ListStages operation, +// ListParticipantEventsPages iterates over the pages of a ListParticipantEvents operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListStages method for more information on how to use this operation. +// See ListParticipantEvents method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListStages operation. +// // Example iterating over at most 3 pages of a ListParticipantEvents operation. // pageNum := 0 -// err := client.ListStagesPages(params, -// func(page *ivsrealtime.ListStagesOutput, lastPage bool) bool { +// err := client.ListParticipantEventsPages(params, +// func(page *ivsrealtime.ListParticipantEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) -func (c *IVSRealTime) ListStagesPages(input *ListStagesInput, fn func(*ListStagesOutput, bool) bool) error { - return c.ListStagesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *IVSRealTime) ListParticipantEventsPages(input *ListParticipantEventsInput, fn func(*ListParticipantEventsOutput, bool) bool) error { + return c.ListParticipantEventsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListStagesPagesWithContext same as ListStagesPages except +// ListParticipantEventsPagesWithContext same as ListParticipantEventsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IVSRealTime) ListStagesPagesWithContext(ctx aws.Context, input *ListStagesInput, fn func(*ListStagesOutput, bool) bool, opts ...request.Option) error { +func (c *IVSRealTime) ListParticipantEventsPagesWithContext(ctx aws.Context, input *ListParticipantEventsInput, fn func(*ListParticipantEventsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListStagesInput + var inCpy *ListParticipantEventsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListStagesRequest(inCpy) + req, _ := c.ListParticipantEventsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -574,7 +736,7 @@ func (c *IVSRealTime) ListStagesPagesWithContext(ctx aws.Context, input *ListSta } for p.Next() { - if !fn(p.Page().(*ListStagesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListParticipantEventsOutput), !p.HasNextPage()) { break } } @@ -582,305 +744,471 @@ func (c *IVSRealTime) ListStagesPagesWithContext(ctx aws.Context, input *ListSta return p.Err() } -const opListTagsForResource = "ListTagsForResource" +const opListParticipants = "ListParticipants" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// ListParticipantsRequest generates a "aws/request.Request" representing the +// client's request for the ListParticipants operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See ListParticipants for more information on using the ListParticipants // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the ListParticipantsRequest method. +// req, resp := client.ListParticipantsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListTagsForResource -func (c *IVSRealTime) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListParticipants +func (c *IVSRealTime) ListParticipantsRequest(input *ListParticipantsInput) (req *request.Request, output *ListParticipantsOutput) { op := &request.Operation{ - Name: opListTagsForResource, - HTTPMethod: "GET", - HTTPPath: "/tags/{resourceArn}", + Name: opListParticipants, + HTTPMethod: "POST", + HTTPPath: "/ListParticipants", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListTagsForResourceInput{} + input = &ListParticipantsInput{} } - output = &ListTagsForResourceOutput{} + output = &ListParticipantsOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Amazon Interactive Video Service RealTime. +// ListParticipants API operation for Amazon Interactive Video Service RealTime. // -// Gets information about AWS tags for the specified ARN. +// Lists all participants in a specified stage session. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Interactive Video Service RealTime's -// API operation ListTagsForResource for usage and error information. +// API operation ListParticipants for usage and error information. // // Returned Error Types: // -// - ResourceNotFoundException -// // - ValidationException // -// - InternalServerException +// - AccessDeniedException // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListTagsForResource -func (c *IVSRealTime) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListParticipants +func (c *IVSRealTime) ListParticipants(input *ListParticipantsInput) (*ListParticipantsOutput, error) { + req, out := c.ListParticipantsRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// ListParticipantsWithContext is the same as ListParticipants with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See ListParticipants for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IVSRealTime) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *IVSRealTime) ListParticipantsWithContext(ctx aws.Context, input *ListParticipantsInput, opts ...request.Option) (*ListParticipantsOutput, error) { + req, out := c.ListParticipantsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +// ListParticipantsPages iterates over the pages of a ListParticipants operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParticipants method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParticipants operation. +// pageNum := 0 +// err := client.ListParticipantsPages(params, +// func(page *ivsrealtime.ListParticipantsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *IVSRealTime) ListParticipantsPages(input *ListParticipantsInput, fn func(*ListParticipantsOutput, bool) bool) error { + return c.ListParticipantsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// ListParticipantsPagesWithContext same as ListParticipantsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IVSRealTime) ListParticipantsPagesWithContext(ctx aws.Context, input *ListParticipantsInput, fn func(*ListParticipantsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListParticipantsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListParticipantsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListParticipantsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListStageSessions = "ListStageSessions" + +// ListStageSessionsRequest generates a "aws/request.Request" representing the +// client's request for the ListStageSessions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See ListStageSessions for more information on using the ListStageSessions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the ListStageSessionsRequest method. +// req, resp := client.ListStageSessionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/TagResource -func (c *IVSRealTime) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListStageSessions +func (c *IVSRealTime) ListStageSessionsRequest(input *ListStageSessionsInput) (req *request.Request, output *ListStageSessionsOutput) { op := &request.Operation{ - Name: opTagResource, + Name: opListStageSessions, HTTPMethod: "POST", - HTTPPath: "/tags/{resourceArn}", + HTTPPath: "/ListStageSessions", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { - input = &TagResourceInput{} + input = &ListStageSessionsInput{} } - output = &TagResourceOutput{} + output = &ListStageSessionsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for Amazon Interactive Video Service RealTime. +// ListStageSessions API operation for Amazon Interactive Video Service RealTime. // -// Adds or updates tags for the AWS resource with the specified ARN. +// Gets all sessions for a specified stage. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Interactive Video Service RealTime's -// API operation TagResource for usage and error information. +// API operation ListStageSessions for usage and error information. // // Returned Error Types: // -// - ResourceNotFoundException -// // - ValidationException // -// - InternalServerException +// - AccessDeniedException // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/TagResource -func (c *IVSRealTime) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListStageSessions +func (c *IVSRealTime) ListStageSessions(input *ListStageSessionsInput) (*ListStageSessionsOutput, error) { + req, out := c.ListStageSessionsRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// ListStageSessionsWithContext is the same as ListStageSessions with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See ListStageSessions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IVSRealTime) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *IVSRealTime) ListStageSessionsWithContext(ctx aws.Context, input *ListStageSessionsInput, opts ...request.Option) (*ListStageSessionsOutput, error) { + req, out := c.ListStageSessionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +// ListStageSessionsPages iterates over the pages of a ListStageSessions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStageSessions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStageSessions operation. +// pageNum := 0 +// err := client.ListStageSessionsPages(params, +// func(page *ivsrealtime.ListStageSessionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *IVSRealTime) ListStageSessionsPages(input *ListStageSessionsInput, fn func(*ListStageSessionsOutput, bool) bool) error { + return c.ListStageSessionsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// ListStageSessionsPagesWithContext same as ListStageSessionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IVSRealTime) ListStageSessionsPagesWithContext(ctx aws.Context, input *ListStageSessionsInput, fn func(*ListStageSessionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListStageSessionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListStageSessionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListStageSessionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListStages = "ListStages" + +// ListStagesRequest generates a "aws/request.Request" representing the +// client's request for the ListStages operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See ListStages for more information on using the ListStages // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the ListStagesRequest method. +// req, resp := client.ListStagesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/UntagResource -func (c *IVSRealTime) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListStages +func (c *IVSRealTime) ListStagesRequest(input *ListStagesInput) (req *request.Request, output *ListStagesOutput) { op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "DELETE", - HTTPPath: "/tags/{resourceArn}", + Name: opListStages, + HTTPMethod: "POST", + HTTPPath: "/ListStages", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { - input = &UntagResourceInput{} + input = &ListStagesInput{} } - output = &UntagResourceOutput{} + output = &ListStagesOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for Amazon Interactive Video Service RealTime. +// ListStages API operation for Amazon Interactive Video Service RealTime. // -// Removes tags from the resource with the specified ARN. +// Gets summary information about all stages in your account, in the AWS region +// where the API request is processed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Interactive Video Service RealTime's -// API operation UntagResource for usage and error information. +// API operation ListStages for usage and error information. // // Returned Error Types: // -// - ResourceNotFoundException -// // - ValidationException // -// - InternalServerException +// - AccessDeniedException // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/UntagResource -func (c *IVSRealTime) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// - ConflictException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListStages +func (c *IVSRealTime) ListStages(input *ListStagesInput) (*ListStagesOutput, error) { + req, out := c.ListStagesRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// ListStagesWithContext is the same as ListStages with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See ListStages for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IVSRealTime) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *IVSRealTime) ListStagesWithContext(ctx aws.Context, input *ListStagesInput, opts ...request.Option) (*ListStagesOutput, error) { + req, out := c.ListStagesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateStage = "UpdateStage" +// ListStagesPages iterates over the pages of a ListStages operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStages method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStages operation. +// pageNum := 0 +// err := client.ListStagesPages(params, +// func(page *ivsrealtime.ListStagesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *IVSRealTime) ListStagesPages(input *ListStagesInput, fn func(*ListStagesOutput, bool) bool) error { + return c.ListStagesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateStageRequest generates a "aws/request.Request" representing the -// client's request for the UpdateStage operation. The "output" return +// ListStagesPagesWithContext same as ListStagesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IVSRealTime) ListStagesPagesWithContext(ctx aws.Context, input *ListStagesInput, fn func(*ListStagesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListStagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListStagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListStagesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateStage for more information on using the UpdateStage +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the UpdateStageRequest method. -// req, resp := client.UpdateStageRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/UpdateStage -func (c *IVSRealTime) UpdateStageRequest(input *UpdateStageInput) (req *request.Request, output *UpdateStageOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListTagsForResource +func (c *IVSRealTime) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ - Name: opUpdateStage, - HTTPMethod: "POST", - HTTPPath: "/UpdateStage", + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", } if input == nil { - input = &UpdateStageInput{} + input = &ListTagsForResourceInput{} } - output = &UpdateStageOutput{} + output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) return } -// UpdateStage API operation for Amazon Interactive Video Service RealTime. +// ListTagsForResource API operation for Amazon Interactive Video Service RealTime. // -// Updates a stage’s configuration. +// Gets information about AWS tags for the specified ARN. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Interactive Video Service RealTime's -// API operation UpdateStage for usage and error information. +// API operation ListTagsForResource for usage and error information. // // Returned Error Types: // @@ -888,47 +1216,295 @@ func (c *IVSRealTime) UpdateStageRequest(input *UpdateStageInput) (req *request. // // - ValidationException // -// - AccessDeniedException -// -// - ServiceQuotaExceededException -// -// - PendingVerification +// - InternalServerException // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/UpdateStage -func (c *IVSRealTime) UpdateStage(input *UpdateStageInput) (*UpdateStageOutput, error) { - req, out := c.UpdateStageRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/ListTagsForResource +func (c *IVSRealTime) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } -// UpdateStageWithContext is the same as UpdateStage with the addition of +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of // the ability to pass a context and additional request options. // -// See UpdateStage for details on how to use this API operation. +// See ListTagsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IVSRealTime) UpdateStageWithContext(ctx aws.Context, input *UpdateStageInput, opts ...request.Option) (*UpdateStageOutput, error) { - req, out := c.UpdateStageRequest(input) +func (c *IVSRealTime) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -type AccessDeniedException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` +const opTagResource = "TagResource" - // User does not have sufficient access to perform this action. - ExceptionMessage *string `locationName:"exceptionMessage" type:"string"` +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/TagResource +func (c *IVSRealTime) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } - Message_ *string `locationName:"message" type:"string"` + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return } -// String returns the string representation. +// TagResource API operation for Amazon Interactive Video Service RealTime. // -// API parameter values that are decorated as "sensitive" in the API will not +// Adds or updates tags for the AWS resource with the specified ARN. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Interactive Video Service RealTime's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// +// - ValidationException +// +// - InternalServerException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/TagResource +func (c *IVSRealTime) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IVSRealTime) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/UntagResource +func (c *IVSRealTime) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Interactive Video Service RealTime. +// +// Removes tags from the resource with the specified ARN. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Interactive Video Service RealTime's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// +// - ValidationException +// +// - InternalServerException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/UntagResource +func (c *IVSRealTime) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IVSRealTime) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateStage = "UpdateStage" + +// UpdateStageRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateStage for more information on using the UpdateStage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateStageRequest method. +// req, resp := client.UpdateStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/UpdateStage +func (c *IVSRealTime) UpdateStageRequest(input *UpdateStageInput) (req *request.Request, output *UpdateStageOutput) { + op := &request.Operation{ + Name: opUpdateStage, + HTTPMethod: "POST", + HTTPPath: "/UpdateStage", + } + + if input == nil { + input = &UpdateStageInput{} + } + + output = &UpdateStageOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateStage API operation for Amazon Interactive Video Service RealTime. +// +// Updates a stage’s configuration. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Interactive Video Service RealTime's +// API operation UpdateStage for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// +// - ValidationException +// +// - AccessDeniedException +// +// - ServiceQuotaExceededException +// +// - PendingVerification +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ivs-realtime-2020-07-14/UpdateStage +func (c *IVSRealTime) UpdateStage(input *UpdateStageInput) (*UpdateStageOutput, error) { + req, out := c.UpdateStageRequest(input) + return out, req.Send() +} + +// UpdateStageWithContext is the same as UpdateStage with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateStage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IVSRealTime) UpdateStageWithContext(ctx aws.Context, input *UpdateStageInput, opts ...request.Option) (*UpdateStageOutput, error) { + req, out := c.UpdateStageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // User does not have sufficient access to perform this action. + ExceptionMessage *string `locationName:"exceptionMessage" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". func (s AccessDeniedException) String() string { @@ -982,14 +1558,504 @@ func (s *AccessDeniedException) RequestID() string { return s.RespMetadata.RequestID } -type ConflictException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Updating or deleting a resource can cause an inconsistent state. + ExceptionMessage *string `locationName:"exceptionMessage" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConflictException) GoString() string { + return s.String() +} + +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateParticipantTokenInput struct { + _ struct{} `type:"structure"` + + // Application-provided attributes to encode into the token and attach to a + // stage. Map keys and values can contain UTF-8 encoded text. The maximum length + // of this field is 1 KB total. This field is exposed to all stage participants + // and should not be used for personally identifying, confidential, or sensitive + // information. + Attributes map[string]*string `locationName:"attributes" type:"map"` + + // Set of capabilities that the user is allowed to perform in the stage. Default: + // PUBLISH, SUBSCRIBE. + Capabilities []*string `locationName:"capabilities" type:"list" enum:"ParticipantTokenCapability"` + + // Duration (in minutes), after which the token expires. Default: 720 (12 hours). + Duration *int64 `locationName:"duration" min:"1" type:"integer"` + + // ARN of the stage to which this token is scoped. + // + // StageArn is a required field + StageArn *string `locationName:"stageArn" min:"1" type:"string" required:"true"` + + // Name that can be specified to help identify the token. This can be any UTF-8 + // encoded text. This field is exposed to all stage participants and should + // not be used for personally identifying, confidential, or sensitive information. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateParticipantTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateParticipantTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateParticipantTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateParticipantTokenInput"} + if s.Duration != nil && *s.Duration < 1 { + invalidParams.Add(request.NewErrParamMinValue("Duration", 1)) + } + if s.StageArn == nil { + invalidParams.Add(request.NewErrParamRequired("StageArn")) + } + if s.StageArn != nil && len(*s.StageArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *CreateParticipantTokenInput) SetAttributes(v map[string]*string) *CreateParticipantTokenInput { + s.Attributes = v + return s +} + +// SetCapabilities sets the Capabilities field's value. +func (s *CreateParticipantTokenInput) SetCapabilities(v []*string) *CreateParticipantTokenInput { + s.Capabilities = v + return s +} + +// SetDuration sets the Duration field's value. +func (s *CreateParticipantTokenInput) SetDuration(v int64) *CreateParticipantTokenInput { + s.Duration = &v + return s +} + +// SetStageArn sets the StageArn field's value. +func (s *CreateParticipantTokenInput) SetStageArn(v string) *CreateParticipantTokenInput { + s.StageArn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *CreateParticipantTokenInput) SetUserId(v string) *CreateParticipantTokenInput { + s.UserId = &v + return s +} + +type CreateParticipantTokenOutput struct { + _ struct{} `type:"structure"` + + // The participant token that was created. + ParticipantToken *ParticipantToken `locationName:"participantToken" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateParticipantTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateParticipantTokenOutput) GoString() string { + return s.String() +} + +// SetParticipantToken sets the ParticipantToken field's value. +func (s *CreateParticipantTokenOutput) SetParticipantToken(v *ParticipantToken) *CreateParticipantTokenOutput { + s.ParticipantToken = v + return s +} + +type CreateStageInput struct { + _ struct{} `type:"structure"` + + // Optional name that can be specified for the stage being created. + Name *string `locationName:"name" type:"string"` + + // Array of participant token configuration objects to attach to the new stage. + ParticipantTokenConfigurations []*ParticipantTokenConfiguration `locationName:"participantTokenConfigurations" type:"list"` + + // Tags attached to the resource. Array of maps, each of the form string:string + // (key:value). See Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // for details, including restrictions that apply to tags and "Tag naming limits + // and requirements"; Amazon IVS has no constraints on tags beyond what is documented + // there. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateStageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStageInput"} + if s.ParticipantTokenConfigurations != nil { + for i, v := range s.ParticipantTokenConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ParticipantTokenConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateStageInput) SetName(v string) *CreateStageInput { + s.Name = &v + return s +} + +// SetParticipantTokenConfigurations sets the ParticipantTokenConfigurations field's value. +func (s *CreateStageInput) SetParticipantTokenConfigurations(v []*ParticipantTokenConfiguration) *CreateStageInput { + s.ParticipantTokenConfigurations = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateStageInput) SetTags(v map[string]*string) *CreateStageInput { + s.Tags = v + return s +} + +type CreateStageOutput struct { + _ struct{} `type:"structure"` + + // Participant tokens attached to the stage. These correspond to the participants + // in the request. + ParticipantTokens []*ParticipantToken `locationName:"participantTokens" type:"list"` + + // The stage that was created. + Stage *Stage `locationName:"stage" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateStageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateStageOutput) GoString() string { + return s.String() +} + +// SetParticipantTokens sets the ParticipantTokens field's value. +func (s *CreateStageOutput) SetParticipantTokens(v []*ParticipantToken) *CreateStageOutput { + s.ParticipantTokens = v + return s +} + +// SetStage sets the Stage field's value. +func (s *CreateStageOutput) SetStage(v *Stage) *CreateStageOutput { + s.Stage = v + return s +} + +type DeleteStageInput struct { + _ struct{} `type:"structure"` + + // ARN of the stage to be deleted. + // + // Arn is a required field + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteStageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStageInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *DeleteStageInput) SetArn(v string) *DeleteStageInput { + s.Arn = &v + return s +} + +type DeleteStageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteStageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteStageOutput) GoString() string { + return s.String() +} + +type DisconnectParticipantInput struct { + _ struct{} `type:"structure"` + + // Identifier of the participant to be disconnected. This is assigned by IVS + // and returned by CreateParticipantToken. + // + // ParticipantId is a required field + ParticipantId *string `locationName:"participantId" type:"string" required:"true"` + + // Description of why this participant is being disconnected. + Reason *string `locationName:"reason" type:"string"` + + // ARN of the stage to which the participant is attached. + // + // StageArn is a required field + StageArn *string `locationName:"stageArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisconnectParticipantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisconnectParticipantInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisconnectParticipantInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisconnectParticipantInput"} + if s.ParticipantId == nil { + invalidParams.Add(request.NewErrParamRequired("ParticipantId")) + } + if s.StageArn == nil { + invalidParams.Add(request.NewErrParamRequired("StageArn")) + } + if s.StageArn != nil && len(*s.StageArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetParticipantId sets the ParticipantId field's value. +func (s *DisconnectParticipantInput) SetParticipantId(v string) *DisconnectParticipantInput { + s.ParticipantId = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *DisconnectParticipantInput) SetReason(v string) *DisconnectParticipantInput { + s.Reason = &v + return s +} + +// SetStageArn sets the StageArn field's value. +func (s *DisconnectParticipantInput) SetStageArn(v string) *DisconnectParticipantInput { + s.StageArn = &v + return s +} + +type DisconnectParticipantOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisconnectParticipantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisconnectParticipantOutput) GoString() string { + return s.String() +} + +// An occurrence during a stage session. +type Event struct { + _ struct{} `type:"structure"` + + // If the event is an error event, the error code is provided to give insight + // into the specific error that occurred. If the event is not an error event, + // this field is null. INSUFFICIENT_CAPABILITIES indicates that the participant + // tried to take an action that the participant’s token is not allowed to + // do. For more information about participant capabilities, see the capabilities + // field in CreateParticipantToken. + ErrorCode *string `locationName:"errorCode" type:"string" enum:"EventErrorCode"` + + // ISO 8601 timestamp (returned as a string) for when the event occurred. + EventTime *time.Time `locationName:"eventTime" type:"timestamp" timestampFormat:"iso8601"` - // Updating or deleting a resource can cause an inconsistent state. - ExceptionMessage *string `locationName:"exceptionMessage" type:"string"` + // The name of the event. + Name *string `locationName:"name" type:"string" enum:"EventName"` - Message_ *string `locationName:"message" type:"string"` + // Unique identifier for the participant who triggered the event. This is assigned + // by IVS. + ParticipantId *string `locationName:"participantId" type:"string"` + + // Unique identifier for the remote participant. For a subscribe event, this + // is the publisher. For a publish or join event, this is null. This is assigned + // by IVS. + RemoteParticipantId *string `locationName:"remoteParticipantId" type:"string"` } // String returns the string representation. @@ -997,7 +2063,7 @@ type ConflictException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ConflictException) String() string { +func (s Event) String() string { return awsutil.Prettify(s) } @@ -1006,74 +2072,58 @@ func (s ConflictException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ConflictException) GoString() string { +func (s Event) GoString() string { return s.String() } -func newErrorConflictException(v protocol.ResponseMetadata) error { - return &ConflictException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ConflictException) Code() string { - return "ConflictException" -} - -// Message returns the exception's message. -func (s *ConflictException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetErrorCode sets the ErrorCode field's value. +func (s *Event) SetErrorCode(v string) *Event { + s.ErrorCode = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ConflictException) OrigErr() error { - return nil +// SetEventTime sets the EventTime field's value. +func (s *Event) SetEventTime(v time.Time) *Event { + s.EventTime = &v + return s } -func (s *ConflictException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetName sets the Name field's value. +func (s *Event) SetName(v string) *Event { + s.Name = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *ConflictException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetParticipantId sets the ParticipantId field's value. +func (s *Event) SetParticipantId(v string) *Event { + s.ParticipantId = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s *ConflictException) RequestID() string { - return s.RespMetadata.RequestID +// SetRemoteParticipantId sets the RemoteParticipantId field's value. +func (s *Event) SetRemoteParticipantId(v string) *Event { + s.RemoteParticipantId = &v + return s } -type CreateParticipantTokenInput struct { +type GetParticipantInput struct { _ struct{} `type:"structure"` - // Application-provided attributes to encode into the token and attach to a - // stage. Map keys and values can contain UTF-8 encoded text. The maximum length - // of this field is 1 KB total. This field is exposed to all stage participants - // and should not be used for personally identifying, confidential, or sensitive - // information. - Attributes map[string]*string `locationName:"attributes" type:"map"` - - // Set of capabilities that the user is allowed to perform in the stage. Default: - // PUBLISH, SUBSCRIBE. - Capabilities []*string `locationName:"capabilities" type:"list" enum:"ParticipantTokenCapability"` + // Unique identifier for the participant. This is assigned by IVS and returned + // by CreateParticipantToken. + // + // ParticipantId is a required field + ParticipantId *string `locationName:"participantId" type:"string" required:"true"` - // Duration (in minutes), after which the token expires. Default: 60 (1 hour). - Duration *int64 `locationName:"duration" min:"1" type:"integer"` + // ID of a session within the stage. + // + // SessionId is a required field + SessionId *string `locationName:"sessionId" min:"16" type:"string" required:"true"` - // ARN of the stage to which this token is scoped. + // Stage ARN. // // StageArn is a required field StageArn *string `locationName:"stageArn" min:"1" type:"string" required:"true"` - - // Name that can be specified to help identify the token. This can be any UTF-8 - // encoded text. This field is exposed to all stage participants and should - // not be used for personally identifying, confidential, or sensitive information. - UserId *string `locationName:"userId" type:"string"` } // String returns the string representation. @@ -1081,7 +2131,7 @@ type CreateParticipantTokenInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateParticipantTokenInput) String() string { +func (s GetParticipantInput) String() string { return awsutil.Prettify(s) } @@ -1090,15 +2140,21 @@ func (s CreateParticipantTokenInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateParticipantTokenInput) GoString() string { +func (s GetParticipantInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateParticipantTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateParticipantTokenInput"} - if s.Duration != nil && *s.Duration < 1 { - invalidParams.Add(request.NewErrParamMinValue("Duration", 1)) +func (s *GetParticipantInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetParticipantInput"} + if s.ParticipantId == nil { + invalidParams.Add(request.NewErrParamRequired("ParticipantId")) + } + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 16)) } if s.StageArn == nil { invalidParams.Add(request.NewErrParamRequired("StageArn")) @@ -1113,41 +2169,62 @@ func (s *CreateParticipantTokenInput) Validate() error { return nil } -// SetAttributes sets the Attributes field's value. -func (s *CreateParticipantTokenInput) SetAttributes(v map[string]*string) *CreateParticipantTokenInput { - s.Attributes = v - return s -} - -// SetCapabilities sets the Capabilities field's value. -func (s *CreateParticipantTokenInput) SetCapabilities(v []*string) *CreateParticipantTokenInput { - s.Capabilities = v +// SetParticipantId sets the ParticipantId field's value. +func (s *GetParticipantInput) SetParticipantId(v string) *GetParticipantInput { + s.ParticipantId = &v return s } -// SetDuration sets the Duration field's value. -func (s *CreateParticipantTokenInput) SetDuration(v int64) *CreateParticipantTokenInput { - s.Duration = &v +// SetSessionId sets the SessionId field's value. +func (s *GetParticipantInput) SetSessionId(v string) *GetParticipantInput { + s.SessionId = &v return s } // SetStageArn sets the StageArn field's value. -func (s *CreateParticipantTokenInput) SetStageArn(v string) *CreateParticipantTokenInput { +func (s *GetParticipantInput) SetStageArn(v string) *GetParticipantInput { s.StageArn = &v return s } -// SetUserId sets the UserId field's value. -func (s *CreateParticipantTokenInput) SetUserId(v string) *CreateParticipantTokenInput { - s.UserId = &v +type GetParticipantOutput struct { + _ struct{} `type:"structure"` + + // The participant that is returned. + Participant *Participant `locationName:"participant" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetParticipantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetParticipantOutput) GoString() string { + return s.String() +} + +// SetParticipant sets the Participant field's value. +func (s *GetParticipantOutput) SetParticipant(v *Participant) *GetParticipantOutput { + s.Participant = v return s } -type CreateParticipantTokenOutput struct { +type GetStageInput struct { _ struct{} `type:"structure"` - // The participant token that was created. - ParticipantToken *ParticipantToken `locationName:"participantToken" type:"structure"` + // ARN of the stage for which the information is to be retrieved. + // + // Arn is a required field + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -1155,7 +2232,7 @@ type CreateParticipantTokenOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateParticipantTokenOutput) String() string { +func (s GetStageInput) String() string { return awsutil.Prettify(s) } @@ -1164,31 +2241,75 @@ func (s CreateParticipantTokenOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateParticipantTokenOutput) GoString() string { +func (s GetStageInput) GoString() string { return s.String() } -// SetParticipantToken sets the ParticipantToken field's value. -func (s *CreateParticipantTokenOutput) SetParticipantToken(v *ParticipantToken) *CreateParticipantTokenOutput { - s.ParticipantToken = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetStageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStageInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *GetStageInput) SetArn(v string) *GetStageInput { + s.Arn = &v return s } -type CreateStageInput struct { +type GetStageOutput struct { _ struct{} `type:"structure"` - // Optional name that can be specified for the stage being created. - Name *string `locationName:"name" type:"string"` + // The stage that is returned. + Stage *Stage `locationName:"stage" type:"structure"` +} - // Array of participant token configuration objects to attach to the new stage. - ParticipantTokenConfigurations []*ParticipantTokenConfiguration `locationName:"participantTokenConfigurations" type:"list"` +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetStageOutput) String() string { + return awsutil.Prettify(s) +} - // Tags attached to the resource. Array of maps, each of the form string:string - // (key:value). See Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) - // for details, including restrictions that apply to tags and "Tag naming limits - // and requirements"; Amazon IVS has no constraints on tags beyond what is documented - // there. - Tags map[string]*string `locationName:"tags" type:"map"` +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetStageOutput) GoString() string { + return s.String() +} + +// SetStage sets the Stage field's value. +func (s *GetStageOutput) SetStage(v *Stage) *GetStageOutput { + s.Stage = v + return s +} + +type GetStageSessionInput struct { + _ struct{} `type:"structure"` + + // ID of a session within the stage. + // + // SessionId is a required field + SessionId *string `locationName:"sessionId" min:"16" type:"string" required:"true"` + + // ARN of the stage for which the information is to be retrieved. + // + // StageArn is a required field + StageArn *string `locationName:"stageArn" min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -1196,7 +2317,7 @@ type CreateStageInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateStageInput) String() string { +func (s GetStageSessionInput) String() string { return awsutil.Prettify(s) } @@ -1205,22 +2326,24 @@ func (s CreateStageInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateStageInput) GoString() string { +func (s GetStageSessionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateStageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateStageInput"} - if s.ParticipantTokenConfigurations != nil { - for i, v := range s.ParticipantTokenConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ParticipantTokenConfigurations", i), err.(request.ErrInvalidParams)) - } - } +func (s *GetStageSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStageSessionInput"} + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 16)) + } + if s.StageArn == nil { + invalidParams.Add(request.NewErrParamRequired("StageArn")) + } + if s.StageArn != nil && len(*s.StageArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageArn", 1)) } if invalidParams.Len() > 0 { @@ -1229,33 +2352,57 @@ func (s *CreateStageInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *CreateStageInput) SetName(v string) *CreateStageInput { - s.Name = &v +// SetSessionId sets the SessionId field's value. +func (s *GetStageSessionInput) SetSessionId(v string) *GetStageSessionInput { + s.SessionId = &v return s } -// SetParticipantTokenConfigurations sets the ParticipantTokenConfigurations field's value. -func (s *CreateStageInput) SetParticipantTokenConfigurations(v []*ParticipantTokenConfiguration) *CreateStageInput { - s.ParticipantTokenConfigurations = v +// SetStageArn sets the StageArn field's value. +func (s *GetStageSessionInput) SetStageArn(v string) *GetStageSessionInput { + s.StageArn = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateStageInput) SetTags(v map[string]*string) *CreateStageInput { - s.Tags = v - return s +type GetStageSessionOutput struct { + _ struct{} `type:"structure"` + + // The stage session that is returned. + StageSession *StageSession `locationName:"stageSession" type:"structure"` } -type CreateStageOutput struct { - _ struct{} `type:"structure"` +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetStageSessionOutput) String() string { + return awsutil.Prettify(s) +} - // Participant tokens attached to the stage. These correspond to the participants - // in the request. - ParticipantTokens []*ParticipantToken `locationName:"participantTokens" type:"list"` +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetStageSessionOutput) GoString() string { + return s.String() +} + +// SetStageSession sets the StageSession field's value. +func (s *GetStageSessionOutput) SetStageSession(v *StageSession) *GetStageSessionOutput { + s.StageSession = v + return s +} + +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The stage that was created. - Stage *Stage `locationName:"stage" type:"structure"` + // Unexpected error during processing of request. + ExceptionMessage *string `locationName:"exceptionMessage" type:"string"` + + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation. @@ -1263,7 +2410,7 @@ type CreateStageOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateStageOutput) String() string { +func (s InternalServerException) String() string { return awsutil.Prettify(s) } @@ -1272,29 +2419,73 @@ func (s CreateStageOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateStageOutput) GoString() string { +func (s InternalServerException) GoString() string { return s.String() } -// SetParticipantTokens sets the ParticipantTokens field's value. -func (s *CreateStageOutput) SetParticipantTokens(v []*ParticipantToken) *CreateStageOutput { - s.ParticipantTokens = v - return s +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } } -// SetStage sets the Stage field's value. -func (s *CreateStageOutput) SetStage(v *Stage) *CreateStageOutput { - s.Stage = v - return s +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" } -type DeleteStageInput struct { +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListParticipantEventsInput struct { _ struct{} `type:"structure"` - // ARN of the stage to be deleted. + // Maximum number of results to return. Default: 50. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The first participant to retrieve. This is used for pagination; see the nextToken + // response field. + NextToken *string `locationName:"nextToken" type:"string"` + + // Unique identifier for this participant. This is assigned by IVS and returned + // by CreateParticipantToken. // - // Arn is a required field - Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + // ParticipantId is a required field + ParticipantId *string `locationName:"participantId" type:"string" required:"true"` + + // ID of a session within the stage. + // + // SessionId is a required field + SessionId *string `locationName:"sessionId" min:"16" type:"string" required:"true"` + + // Stage ARN. + // + // StageArn is a required field + StageArn *string `locationName:"stageArn" min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -1302,7 +2493,7 @@ type DeleteStageInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteStageInput) String() string { +func (s ListParticipantEventsInput) String() string { return awsutil.Prettify(s) } @@ -1311,18 +2502,30 @@ func (s DeleteStageInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteStageInput) GoString() string { +func (s ListParticipantEventsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteStageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteStageInput"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) +func (s *ListParticipantEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListParticipantEventsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.Arn != nil && len(*s.Arn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + if s.ParticipantId == nil { + invalidParams.Add(request.NewErrParamRequired("ParticipantId")) + } + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 16)) + } + if s.StageArn == nil { + invalidParams.Add(request.NewErrParamRequired("StageArn")) + } + if s.StageArn != nil && len(*s.StageArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageArn", 1)) } if invalidParams.Len() > 0 { @@ -1331,14 +2534,47 @@ func (s *DeleteStageInput) Validate() error { return nil } -// SetArn sets the Arn field's value. -func (s *DeleteStageInput) SetArn(v string) *DeleteStageInput { - s.Arn = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListParticipantEventsInput) SetMaxResults(v int64) *ListParticipantEventsInput { + s.MaxResults = &v return s } -type DeleteStageOutput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListParticipantEventsInput) SetNextToken(v string) *ListParticipantEventsInput { + s.NextToken = &v + return s +} + +// SetParticipantId sets the ParticipantId field's value. +func (s *ListParticipantEventsInput) SetParticipantId(v string) *ListParticipantEventsInput { + s.ParticipantId = &v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *ListParticipantEventsInput) SetSessionId(v string) *ListParticipantEventsInput { + s.SessionId = &v + return s +} + +// SetStageArn sets the StageArn field's value. +func (s *ListParticipantEventsInput) SetStageArn(v string) *ListParticipantEventsInput { + s.StageArn = &v + return s +} + +type ListParticipantEventsOutput struct { _ struct{} `type:"structure"` + + // List of the matching events. + // + // Events is a required field + Events []*Event `locationName:"events" type:"list" required:"true"` + + // If there are more rooms than maxResults, use nextToken in the request to + // get the next set. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation. @@ -1346,7 +2582,7 @@ type DeleteStageOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteStageOutput) String() string { +func (s ListParticipantEventsOutput) String() string { return awsutil.Prettify(s) } @@ -1355,22 +2591,54 @@ func (s DeleteStageOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteStageOutput) GoString() string { +func (s ListParticipantEventsOutput) GoString() string { return s.String() } -type DisconnectParticipantInput struct { +// SetEvents sets the Events field's value. +func (s *ListParticipantEventsOutput) SetEvents(v []*Event) *ListParticipantEventsOutput { + s.Events = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListParticipantEventsOutput) SetNextToken(v string) *ListParticipantEventsOutput { + s.NextToken = &v + return s +} + +type ListParticipantsInput struct { _ struct{} `type:"structure"` - // Identifier of the participant to be disconnected. This is returned by CreateParticipantToken. - // - // ParticipantId is a required field - ParticipantId *string `locationName:"participantId" type:"string" required:"true"` + // Filters the response list to only show participants who published during + // the stage session. Only one of filterByUserId, filterByPublished, or filterByState + // can be provided per request. + FilterByPublished *bool `locationName:"filterByPublished" type:"boolean"` - // Description of why this participant is being disconnected. - Reason *string `locationName:"reason" type:"string"` + // Filters the response list to only show participants in the specified state. + // Only one of filterByUserId, filterByPublished, or filterByState can be provided + // per request. + FilterByState *string `locationName:"filterByState" type:"string" enum:"ParticipantState"` - // ARN of the stage to which the participant is attached. + // Filters the response list to match the specified user ID. Only one of filterByUserId, + // filterByPublished, or filterByState can be provided per request. A userId + // is a customer-assigned name to help identify the token; this can be used + // to link a participant to a user in the customer’s own systems. + FilterByUserId *string `locationName:"filterByUserId" type:"string"` + + // Maximum number of results to return. Default: 50. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The first participant to retrieve. This is used for pagination; see the nextToken + // response field. + NextToken *string `locationName:"nextToken" type:"string"` + + // ID of the session within the stage. + // + // SessionId is a required field + SessionId *string `locationName:"sessionId" min:"16" type:"string" required:"true"` + + // Stage ARN. // // StageArn is a required field StageArn *string `locationName:"stageArn" min:"1" type:"string" required:"true"` @@ -1381,7 +2649,7 @@ type DisconnectParticipantInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DisconnectParticipantInput) String() string { +func (s ListParticipantsInput) String() string { return awsutil.Prettify(s) } @@ -1390,15 +2658,21 @@ func (s DisconnectParticipantInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DisconnectParticipantInput) GoString() string { +func (s ListParticipantsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DisconnectParticipantInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisconnectParticipantInput"} - if s.ParticipantId == nil { - invalidParams.Add(request.NewErrParamRequired("ParticipantId")) +func (s *ListParticipantsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListParticipantsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 16)) } if s.StageArn == nil { invalidParams.Add(request.NewErrParamRequired("StageArn")) @@ -1413,26 +2687,59 @@ func (s *DisconnectParticipantInput) Validate() error { return nil } -// SetParticipantId sets the ParticipantId field's value. -func (s *DisconnectParticipantInput) SetParticipantId(v string) *DisconnectParticipantInput { - s.ParticipantId = &v +// SetFilterByPublished sets the FilterByPublished field's value. +func (s *ListParticipantsInput) SetFilterByPublished(v bool) *ListParticipantsInput { + s.FilterByPublished = &v return s } -// SetReason sets the Reason field's value. -func (s *DisconnectParticipantInput) SetReason(v string) *DisconnectParticipantInput { - s.Reason = &v +// SetFilterByState sets the FilterByState field's value. +func (s *ListParticipantsInput) SetFilterByState(v string) *ListParticipantsInput { + s.FilterByState = &v + return s +} + +// SetFilterByUserId sets the FilterByUserId field's value. +func (s *ListParticipantsInput) SetFilterByUserId(v string) *ListParticipantsInput { + s.FilterByUserId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListParticipantsInput) SetMaxResults(v int64) *ListParticipantsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListParticipantsInput) SetNextToken(v string) *ListParticipantsInput { + s.NextToken = &v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *ListParticipantsInput) SetSessionId(v string) *ListParticipantsInput { + s.SessionId = &v return s } // SetStageArn sets the StageArn field's value. -func (s *DisconnectParticipantInput) SetStageArn(v string) *DisconnectParticipantInput { +func (s *ListParticipantsInput) SetStageArn(v string) *ListParticipantsInput { s.StageArn = &v return s } -type DisconnectParticipantOutput struct { +type ListParticipantsOutput struct { _ struct{} `type:"structure"` + + // If there are more rooms than maxResults, use nextToken in the request to + // get the next set. + NextToken *string `locationName:"nextToken" type:"string"` + + // List of the matching participants (summary information only). + // + // Participants is a required field + Participants []*ParticipantSummary `locationName:"participants" type:"list" required:"true"` } // String returns the string representation. @@ -1440,7 +2747,7 @@ type DisconnectParticipantOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DisconnectParticipantOutput) String() string { +func (s ListParticipantsOutput) String() string { return awsutil.Prettify(s) } @@ -1449,17 +2756,36 @@ func (s DisconnectParticipantOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DisconnectParticipantOutput) GoString() string { +func (s ListParticipantsOutput) GoString() string { return s.String() } -type GetStageInput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListParticipantsOutput) SetNextToken(v string) *ListParticipantsOutput { + s.NextToken = &v + return s +} + +// SetParticipants sets the Participants field's value. +func (s *ListParticipantsOutput) SetParticipants(v []*ParticipantSummary) *ListParticipantsOutput { + s.Participants = v + return s +} + +type ListStageSessionsInput struct { _ struct{} `type:"structure"` - // ARN of the stage for which the information is to be retrieved. + // Maximum number of results to return. Default: 50. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The first stage to retrieve. This is used for pagination; see the nextToken + // response field. + NextToken *string `locationName:"nextToken" type:"string"` + + // Stage ARN. // - // Arn is a required field - Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + // StageArn is a required field + StageArn *string `locationName:"stageArn" min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -1467,7 +2793,7 @@ type GetStageInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetStageInput) String() string { +func (s ListStageSessionsInput) String() string { return awsutil.Prettify(s) } @@ -1476,18 +2802,21 @@ func (s GetStageInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetStageInput) GoString() string { +func (s ListStageSessionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetStageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetStageInput"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) +func (s *ListStageSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStageSessionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.Arn != nil && len(*s.Arn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + if s.StageArn == nil { + invalidParams.Add(request.NewErrParamRequired("StageArn")) + } + if s.StageArn != nil && len(*s.StageArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageArn", 1)) } if invalidParams.Len() > 0 { @@ -1496,51 +2825,35 @@ func (s *GetStageInput) Validate() error { return nil } -// SetArn sets the Arn field's value. -func (s *GetStageInput) SetArn(v string) *GetStageInput { - s.Arn = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListStageSessionsInput) SetMaxResults(v int64) *ListStageSessionsInput { + s.MaxResults = &v return s } -type GetStageOutput struct { - _ struct{} `type:"structure"` - - // Object specifying a stage. - Stage *Stage `locationName:"stage" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetStageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetStageOutput) GoString() string { - return s.String() +// SetNextToken sets the NextToken field's value. +func (s *ListStageSessionsInput) SetNextToken(v string) *ListStageSessionsInput { + s.NextToken = &v + return s } -// SetStage sets the Stage field's value. -func (s *GetStageOutput) SetStage(v *Stage) *GetStageOutput { - s.Stage = v +// SetStageArn sets the StageArn field's value. +func (s *ListStageSessionsInput) SetStageArn(v string) *ListStageSessionsInput { + s.StageArn = &v return s } -type InternalServerException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` +type ListStageSessionsOutput struct { + _ struct{} `type:"structure"` - // Unexpected error during processing of request. - ExceptionMessage *string `locationName:"exceptionMessage" type:"string"` + // If there are more rooms than maxResults, use nextToken in the request to + // get the next set. + NextToken *string `locationName:"nextToken" type:"string"` - Message_ *string `locationName:"message" type:"string"` + // List of matching stage sessions. + // + // StageSessions is a required field + StageSessions []*StageSessionSummary `locationName:"stageSessions" type:"list" required:"true"` } // String returns the string representation. @@ -1548,7 +2861,7 @@ type InternalServerException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InternalServerException) String() string { +func (s ListStageSessionsOutput) String() string { return awsutil.Prettify(s) } @@ -1557,46 +2870,20 @@ func (s InternalServerException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InternalServerException) GoString() string { +func (s ListStageSessionsOutput) GoString() string { return s.String() } -func newErrorInternalServerException(v protocol.ResponseMetadata) error { - return &InternalServerException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InternalServerException) Code() string { - return "InternalServerException" -} - -// Message returns the exception's message. -func (s *InternalServerException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalServerException) OrigErr() error { - return nil -} - -func (s *InternalServerException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InternalServerException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetNextToken sets the NextToken field's value. +func (s *ListStageSessionsOutput) SetNextToken(v string) *ListStageSessionsOutput { + s.NextToken = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s *InternalServerException) RequestID() string { - return s.RespMetadata.RequestID +// SetStageSessions sets the StageSessions field's value. +func (s *ListStageSessionsOutput) SetStageSessions(v []*StageSessionSummary) *ListStageSessionsOutput { + s.StageSessions = v + return s } type ListStagesInput struct { @@ -1779,6 +3066,163 @@ func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForRe return s } +// Object describing a participant that has joined a stage. +type Participant struct { + _ struct{} `type:"structure"` + + // Application-provided attributes to encode into the token and attach to a + // stage. Map keys and values can contain UTF-8 encoded text. The maximum length + // of this field is 1 KB total. This field is exposed to all stage participants + // and should not be used for personally identifying, confidential, or sensitive + // information. + Attributes map[string]*string `locationName:"attributes" type:"map"` + + // ISO 8601 timestamp (returned as a string) when the participant first joined + // the stage session. + FirstJoinTime *time.Time `locationName:"firstJoinTime" type:"timestamp" timestampFormat:"iso8601"` + + // Unique identifier for this participant, assigned by IVS. + ParticipantId *string `locationName:"participantId" type:"string"` + + // Whether the participant ever published to the stage session. + Published *bool `locationName:"published" type:"boolean"` + + // Whether the participant is connected to or disconnected from the stage. + State *string `locationName:"state" type:"string" enum:"ParticipantState"` + + // Customer-assigned name to help identify the token; this can be used to link + // a participant to a user in the customer’s own systems. This can be any + // UTF-8 encoded text. This field is exposed to all stage participants and should + // not be used for personally identifying, confidential, or sensitive information. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Participant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Participant) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *Participant) SetAttributes(v map[string]*string) *Participant { + s.Attributes = v + return s +} + +// SetFirstJoinTime sets the FirstJoinTime field's value. +func (s *Participant) SetFirstJoinTime(v time.Time) *Participant { + s.FirstJoinTime = &v + return s +} + +// SetParticipantId sets the ParticipantId field's value. +func (s *Participant) SetParticipantId(v string) *Participant { + s.ParticipantId = &v + return s +} + +// SetPublished sets the Published field's value. +func (s *Participant) SetPublished(v bool) *Participant { + s.Published = &v + return s +} + +// SetState sets the State field's value. +func (s *Participant) SetState(v string) *Participant { + s.State = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *Participant) SetUserId(v string) *Participant { + s.UserId = &v + return s +} + +// Summary object describing a participant that has joined a stage. +type ParticipantSummary struct { + _ struct{} `type:"structure"` + + // ISO 8601 timestamp (returned as a string) when the participant first joined + // the stage session. + FirstJoinTime *time.Time `locationName:"firstJoinTime" type:"timestamp" timestampFormat:"iso8601"` + + // Unique identifier for this participant, assigned by IVS. + ParticipantId *string `locationName:"participantId" type:"string"` + + // Whether the participant ever published to the stage session. + Published *bool `locationName:"published" type:"boolean"` + + // Whether the participant is connected to or disconnected from the stage. + State *string `locationName:"state" type:"string" enum:"ParticipantState"` + + // Customer-assigned name to help identify the token; this can be used to link + // a participant to a user in the customer’s own systems. This can be any + // UTF-8 encoded text. This field is exposed to all stage participants and should + // not be used for personally identifying, confidential, or sensitive information. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParticipantSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParticipantSummary) GoString() string { + return s.String() +} + +// SetFirstJoinTime sets the FirstJoinTime field's value. +func (s *ParticipantSummary) SetFirstJoinTime(v time.Time) *ParticipantSummary { + s.FirstJoinTime = &v + return s +} + +// SetParticipantId sets the ParticipantId field's value. +func (s *ParticipantSummary) SetParticipantId(v string) *ParticipantSummary { + s.ParticipantId = &v + return s +} + +// SetPublished sets the Published field's value. +func (s *ParticipantSummary) SetPublished(v bool) *ParticipantSummary { + s.Published = &v + return s +} + +// SetState sets the State field's value. +func (s *ParticipantSummary) SetState(v string) *ParticipantSummary { + s.State = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *ParticipantSummary) SetUserId(v string) *ParticipantSummary { + s.UserId = &v + return s +} + // Object specifying a participant token in a stage. type ParticipantToken struct { _ struct{} `type:"structure"` @@ -1792,7 +3236,7 @@ type ParticipantToken struct { Capabilities []*string `locationName:"capabilities" type:"list" enum:"ParticipantTokenCapability"` // Duration (in minutes), after which the participant token expires. Default: - // 60 (1 hour). + // 720 (12 hours). Duration *int64 `locationName:"duration" min:"1" type:"integer"` // ISO 8601 timestamp (returned as a string) for when this token expires. @@ -1808,9 +3252,10 @@ type ParticipantToken struct { // String and GoString methods. Token *string `locationName:"token" type:"string" sensitive:"true"` - // Name to help identify the token. This can be any UTF-8 encoded text. This - // field is exposed to all stage participants and should not be used for personally - // identifying, confidential, or sensitive information. + // Customer-assigned name to help identify the token; this can be used to link + // a participant to a user in the customer’s own systems. This can be any + // UTF-8 encoded text. This field is exposed to all stage participants and should + // not be used for personally identifying, confidential, or sensitive information. UserId *string `locationName:"userId" type:"string"` } @@ -1889,13 +3334,13 @@ type ParticipantTokenConfiguration struct { Capabilities []*string `locationName:"capabilities" type:"list" enum:"ParticipantTokenCapability"` // Duration (in minutes), after which the corresponding participant token expires. - // Default: 60 (1 hour). + // Default: 720 (12 hours). Duration *int64 `locationName:"duration" min:"1" type:"integer"` - // Name that can be specified to help identify the corresponding participant - // token. This can be any UTF-8 encoded text. This field is exposed to all stage - // participants and should not be used for personally identifying, confidential, - // or sensitive information. + // Customer-assigned name to help identify the token; this can be used to link + // a participant to a user in the customer’s own systems. This can be any + // UTF-8 encoded text. This field is exposed to all stage participants and should + // not be used for personally identifying, confidential, or sensitive information. UserId *string `locationName:"userId" type:"string"` } @@ -2217,6 +3662,111 @@ func (s *Stage) SetTags(v map[string]*string) *Stage { return s } +// A stage session begins when the first participant joins a stage and ends +// after the last participant leaves the stage. A stage session helps with debugging +// stages by grouping events and participants into shorter periods of time (i.e., +// a session), which is helpful when stages are used over long periods of time. +type StageSession struct { + _ struct{} `type:"structure"` + + // ISO 8601 timestamp (returned as a string) when the stage session ended. This + // is null if the stage is active. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // ID of the session within the stage. + SessionId *string `locationName:"sessionId" min:"16" type:"string"` + + // ISO 8601 timestamp (returned as a string) when this stage session began. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StageSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StageSession) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *StageSession) SetEndTime(v time.Time) *StageSession { + s.EndTime = &v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *StageSession) SetSessionId(v string) *StageSession { + s.SessionId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *StageSession) SetStartTime(v time.Time) *StageSession { + s.StartTime = &v + return s +} + +// Summary information about a stage session. +type StageSessionSummary struct { + _ struct{} `type:"structure"` + + // ISO 8601 timestamp (returned as a string) when the stage session ended. This + // is null if the stage is active. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // ID of the session within the stage. + SessionId *string `locationName:"sessionId" min:"16" type:"string"` + + // ISO 8601 timestamp (returned as a string) when this stage session began. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StageSessionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StageSessionSummary) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *StageSessionSummary) SetEndTime(v time.Time) *StageSessionSummary { + s.EndTime = &v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *StageSessionSummary) SetSessionId(v string) *StageSessionSummary { + s.SessionId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *StageSessionSummary) SetStartTime(v time.Time) *StageSessionSummary { + s.StartTime = &v + return s +} + // Summary information about a stage. type StageSummary struct { _ struct{} `type:"structure"` @@ -2616,6 +4166,78 @@ func (s *ValidationException) RequestID() string { return s.RespMetadata.RequestID } +const ( + // EventErrorCodeInsufficientCapabilities is a EventErrorCode enum value + EventErrorCodeInsufficientCapabilities = "INSUFFICIENT_CAPABILITIES" +) + +// EventErrorCode_Values returns all elements of the EventErrorCode enum +func EventErrorCode_Values() []string { + return []string{ + EventErrorCodeInsufficientCapabilities, + } +} + +const ( + // EventNameJoined is a EventName enum value + EventNameJoined = "JOINED" + + // EventNameLeft is a EventName enum value + EventNameLeft = "LEFT" + + // EventNamePublishStarted is a EventName enum value + EventNamePublishStarted = "PUBLISH_STARTED" + + // EventNamePublishStopped is a EventName enum value + EventNamePublishStopped = "PUBLISH_STOPPED" + + // EventNameSubscribeStarted is a EventName enum value + EventNameSubscribeStarted = "SUBSCRIBE_STARTED" + + // EventNameSubscribeStopped is a EventName enum value + EventNameSubscribeStopped = "SUBSCRIBE_STOPPED" + + // EventNamePublishError is a EventName enum value + EventNamePublishError = "PUBLISH_ERROR" + + // EventNameSubscribeError is a EventName enum value + EventNameSubscribeError = "SUBSCRIBE_ERROR" + + // EventNameJoinError is a EventName enum value + EventNameJoinError = "JOIN_ERROR" +) + +// EventName_Values returns all elements of the EventName enum +func EventName_Values() []string { + return []string{ + EventNameJoined, + EventNameLeft, + EventNamePublishStarted, + EventNamePublishStopped, + EventNameSubscribeStarted, + EventNameSubscribeStopped, + EventNamePublishError, + EventNameSubscribeError, + EventNameJoinError, + } +} + +const ( + // ParticipantStateConnected is a ParticipantState enum value + ParticipantStateConnected = "CONNECTED" + + // ParticipantStateDisconnected is a ParticipantState enum value + ParticipantStateDisconnected = "DISCONNECTED" +) + +// ParticipantState_Values returns all elements of the ParticipantState enum +func ParticipantState_Values() []string { + return []string{ + ParticipantStateConnected, + ParticipantStateDisconnected, + } +} + const ( // ParticipantTokenCapabilityPublish is a ParticipantTokenCapability enum value ParticipantTokenCapabilityPublish = "PUBLISH" diff --git a/service/ivsrealtime/doc.go b/service/ivsrealtime/doc.go index c928e7019b1..5e433ce96ec 100644 --- a/service/ivsrealtime/doc.go +++ b/service/ivsrealtime/doc.go @@ -9,8 +9,18 @@ // using a standard HTTP API and an AWS EventBridge event stream for responses. // JSON is used for both requests and responses, including errors. // -// Terminology: The IVS stage API sometimes is referred to as the IVS RealTime -// API. +// Terminology: +// +// - The IVS stage API sometimes is referred to as the IVS RealTime API. +// +// - A participant token is an authorization token used to publish/subscribe +// to a stage. +// +// - A participant object represents participants (people) in the stage and +// contains information about them. When a token is created, it includes +// a participant ID; when a participant uses that token to join a stage, +// the participant is associated with that participant ID There is a 1:1 +// mapping between participant tokens and participants. // // # Resources // @@ -51,11 +61,23 @@ // - DisconnectParticipant — Disconnects a specified participant and revokes // the participant permanently from a specified stage. // +// - GetParticipant — Gets information about the specified participant +// token. +// // - GetStage — Gets information for the specified stage. // +// - GetStageSession — Gets information for the specified stage session. +// +// - ListParticipantEvents — Lists events for a specified participant that +// occurred during a specified stage session. +// +// - ListParticipants — Lists all participants in a specified stage session. +// // - ListStages — Gets summary information about all stages in your account, // in the AWS region where the API request is processed. // +// - ListStageSessions — Gets all sessions for a specified stage. +// // - UpdateStage — Updates a stage’s configuration. // // Tags Endpoints diff --git a/service/ivsrealtime/ivsrealtimeiface/interface.go b/service/ivsrealtime/ivsrealtimeiface/interface.go index 92122efa0c1..ae90886c376 100644 --- a/service/ivsrealtime/ivsrealtimeiface/interface.go +++ b/service/ivsrealtime/ivsrealtimeiface/interface.go @@ -76,10 +76,39 @@ type IVSRealTimeAPI interface { DisconnectParticipantWithContext(aws.Context, *ivsrealtime.DisconnectParticipantInput, ...request.Option) (*ivsrealtime.DisconnectParticipantOutput, error) DisconnectParticipantRequest(*ivsrealtime.DisconnectParticipantInput) (*request.Request, *ivsrealtime.DisconnectParticipantOutput) + GetParticipant(*ivsrealtime.GetParticipantInput) (*ivsrealtime.GetParticipantOutput, error) + GetParticipantWithContext(aws.Context, *ivsrealtime.GetParticipantInput, ...request.Option) (*ivsrealtime.GetParticipantOutput, error) + GetParticipantRequest(*ivsrealtime.GetParticipantInput) (*request.Request, *ivsrealtime.GetParticipantOutput) + GetStage(*ivsrealtime.GetStageInput) (*ivsrealtime.GetStageOutput, error) GetStageWithContext(aws.Context, *ivsrealtime.GetStageInput, ...request.Option) (*ivsrealtime.GetStageOutput, error) GetStageRequest(*ivsrealtime.GetStageInput) (*request.Request, *ivsrealtime.GetStageOutput) + GetStageSession(*ivsrealtime.GetStageSessionInput) (*ivsrealtime.GetStageSessionOutput, error) + GetStageSessionWithContext(aws.Context, *ivsrealtime.GetStageSessionInput, ...request.Option) (*ivsrealtime.GetStageSessionOutput, error) + GetStageSessionRequest(*ivsrealtime.GetStageSessionInput) (*request.Request, *ivsrealtime.GetStageSessionOutput) + + ListParticipantEvents(*ivsrealtime.ListParticipantEventsInput) (*ivsrealtime.ListParticipantEventsOutput, error) + ListParticipantEventsWithContext(aws.Context, *ivsrealtime.ListParticipantEventsInput, ...request.Option) (*ivsrealtime.ListParticipantEventsOutput, error) + ListParticipantEventsRequest(*ivsrealtime.ListParticipantEventsInput) (*request.Request, *ivsrealtime.ListParticipantEventsOutput) + + ListParticipantEventsPages(*ivsrealtime.ListParticipantEventsInput, func(*ivsrealtime.ListParticipantEventsOutput, bool) bool) error + ListParticipantEventsPagesWithContext(aws.Context, *ivsrealtime.ListParticipantEventsInput, func(*ivsrealtime.ListParticipantEventsOutput, bool) bool, ...request.Option) error + + ListParticipants(*ivsrealtime.ListParticipantsInput) (*ivsrealtime.ListParticipantsOutput, error) + ListParticipantsWithContext(aws.Context, *ivsrealtime.ListParticipantsInput, ...request.Option) (*ivsrealtime.ListParticipantsOutput, error) + ListParticipantsRequest(*ivsrealtime.ListParticipantsInput) (*request.Request, *ivsrealtime.ListParticipantsOutput) + + ListParticipantsPages(*ivsrealtime.ListParticipantsInput, func(*ivsrealtime.ListParticipantsOutput, bool) bool) error + ListParticipantsPagesWithContext(aws.Context, *ivsrealtime.ListParticipantsInput, func(*ivsrealtime.ListParticipantsOutput, bool) bool, ...request.Option) error + + ListStageSessions(*ivsrealtime.ListStageSessionsInput) (*ivsrealtime.ListStageSessionsOutput, error) + ListStageSessionsWithContext(aws.Context, *ivsrealtime.ListStageSessionsInput, ...request.Option) (*ivsrealtime.ListStageSessionsOutput, error) + ListStageSessionsRequest(*ivsrealtime.ListStageSessionsInput) (*request.Request, *ivsrealtime.ListStageSessionsOutput) + + ListStageSessionsPages(*ivsrealtime.ListStageSessionsInput, func(*ivsrealtime.ListStageSessionsOutput, bool) bool) error + ListStageSessionsPagesWithContext(aws.Context, *ivsrealtime.ListStageSessionsInput, func(*ivsrealtime.ListStageSessionsOutput, bool) bool, ...request.Option) error + ListStages(*ivsrealtime.ListStagesInput) (*ivsrealtime.ListStagesOutput, error) ListStagesWithContext(aws.Context, *ivsrealtime.ListStagesInput, ...request.Option) (*ivsrealtime.ListStagesOutput, error) ListStagesRequest(*ivsrealtime.ListStagesInput) (*request.Request, *ivsrealtime.ListStagesOutput) diff --git a/service/omics/api.go b/service/omics/api.go index 15c2f6cea4d..2d9b55cb463 100644 --- a/service/omics/api.go +++ b/service/omics/api.go @@ -10,10 +10,115 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) +const opAbortMultipartReadSetUpload = "AbortMultipartReadSetUpload" + +// AbortMultipartReadSetUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartReadSetUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AbortMultipartReadSetUpload for more information on using the AbortMultipartReadSetUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AbortMultipartReadSetUploadRequest method. +// req, resp := client.AbortMultipartReadSetUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/AbortMultipartReadSetUpload +func (c *Omics) AbortMultipartReadSetUploadRequest(input *AbortMultipartReadSetUploadInput) (req *request.Request, output *AbortMultipartReadSetUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartReadSetUpload, + HTTPMethod: "DELETE", + HTTPPath: "/sequencestore/{sequenceStoreId}/upload/{uploadId}/abort", + } + + if input == nil { + input = &AbortMultipartReadSetUploadInput{} + } + + output = &AbortMultipartReadSetUploadOutput{} + req = c.newRequest(op, input, output) + + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// AbortMultipartReadSetUpload API operation for Amazon Omics. +// +// Stops a multipart upload. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Omics's +// API operation AbortMultipartReadSetUpload for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// An unexpected error occurred. Try the request again. +// +// - NotSupportedOperationException +// The operation is not supported by Amazon Omics, or the API does not exist. +// +// - ServiceQuotaExceededException +// The request exceeds a service quota. +// +// - ThrottlingException +// The request was denied due to request throttling. +// +// - ValidationException +// The input fails to satisfy the constraints specified by an AWS service. +// +// - ResourceNotFoundException +// The target resource was not found in the current Region. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - RequestTimeoutException +// The request timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/AbortMultipartReadSetUpload +func (c *Omics) AbortMultipartReadSetUpload(input *AbortMultipartReadSetUploadInput) (*AbortMultipartReadSetUploadOutput, error) { + req, out := c.AbortMultipartReadSetUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartReadSetUploadWithContext is the same as AbortMultipartReadSetUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartReadSetUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Omics) AbortMultipartReadSetUploadWithContext(ctx aws.Context, input *AbortMultipartReadSetUploadInput, opts ...request.Option) (*AbortMultipartReadSetUploadOutput, error) { + req, out := c.AbortMultipartReadSetUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opBatchDeleteReadSet = "BatchDeleteReadSet" // BatchDeleteReadSetRequest generates a "aws/request.Request" representing the @@ -52,6 +157,7 @@ func (c *Omics) BatchDeleteReadSetRequest(input *BatchDeleteReadSetInput) (req * output = &BatchDeleteReadSetOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -148,6 +254,7 @@ func (c *Omics) CancelAnnotationImportJobRequest(input *CancelAnnotationImportJo output = &CancelAnnotationImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -242,6 +349,7 @@ func (c *Omics) CancelRunRequest(input *CancelRunInput) (req *request.Request, o output = &CancelRunOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -345,6 +453,7 @@ func (c *Omics) CancelVariantImportJobRequest(input *CancelVariantImportJobInput output = &CancelVariantImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -401,6 +510,109 @@ func (c *Omics) CancelVariantImportJobWithContext(ctx aws.Context, input *Cancel return out, req.Send() } +const opCompleteMultipartReadSetUpload = "CompleteMultipartReadSetUpload" + +// CompleteMultipartReadSetUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartReadSetUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMultipartReadSetUpload for more information on using the CompleteMultipartReadSetUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CompleteMultipartReadSetUploadRequest method. +// req, resp := client.CompleteMultipartReadSetUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/CompleteMultipartReadSetUpload +func (c *Omics) CompleteMultipartReadSetUploadRequest(input *CompleteMultipartReadSetUploadInput) (req *request.Request, output *CompleteMultipartReadSetUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartReadSetUpload, + HTTPMethod: "POST", + HTTPPath: "/sequencestore/{sequenceStoreId}/upload/{uploadId}/complete", + } + + if input == nil { + input = &CompleteMultipartReadSetUploadInput{} + } + + output = &CompleteMultipartReadSetUploadOutput{} + req = c.newRequest(op, input, output) + + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("storage-", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// CompleteMultipartReadSetUpload API operation for Amazon Omics. +// +// Concludes a multipart upload once you have uploaded all the components. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Omics's +// API operation CompleteMultipartReadSetUpload for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// An unexpected error occurred. Try the request again. +// +// - NotSupportedOperationException +// The operation is not supported by Amazon Omics, or the API does not exist. +// +// - ServiceQuotaExceededException +// The request exceeds a service quota. +// +// - ThrottlingException +// The request was denied due to request throttling. +// +// - ValidationException +// The input fails to satisfy the constraints specified by an AWS service. +// +// - ResourceNotFoundException +// The target resource was not found in the current Region. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - RequestTimeoutException +// The request timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/CompleteMultipartReadSetUpload +func (c *Omics) CompleteMultipartReadSetUpload(input *CompleteMultipartReadSetUploadInput) (*CompleteMultipartReadSetUploadOutput, error) { + req, out := c.CompleteMultipartReadSetUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartReadSetUploadWithContext is the same as CompleteMultipartReadSetUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartReadSetUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Omics) CompleteMultipartReadSetUploadWithContext(ctx aws.Context, input *CompleteMultipartReadSetUploadInput, opts ...request.Option) (*CompleteMultipartReadSetUploadOutput, error) { + req, out := c.CompleteMultipartReadSetUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateAnnotationStore = "CreateAnnotationStore" // CreateAnnotationStoreRequest generates a "aws/request.Request" representing the @@ -439,6 +651,7 @@ func (c *Omics) CreateAnnotationStoreRequest(input *CreateAnnotationStoreInput) output = &CreateAnnotationStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -500,6 +713,109 @@ func (c *Omics) CreateAnnotationStoreWithContext(ctx aws.Context, input *CreateA return out, req.Send() } +const opCreateMultipartReadSetUpload = "CreateMultipartReadSetUpload" + +// CreateMultipartReadSetUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartReadSetUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMultipartReadSetUpload for more information on using the CreateMultipartReadSetUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateMultipartReadSetUploadRequest method. +// req, resp := client.CreateMultipartReadSetUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/CreateMultipartReadSetUpload +func (c *Omics) CreateMultipartReadSetUploadRequest(input *CreateMultipartReadSetUploadInput) (req *request.Request, output *CreateMultipartReadSetUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartReadSetUpload, + HTTPMethod: "POST", + HTTPPath: "/sequencestore/{sequenceStoreId}/upload", + } + + if input == nil { + input = &CreateMultipartReadSetUploadInput{} + } + + output = &CreateMultipartReadSetUploadOutput{} + req = c.newRequest(op, input, output) + + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// CreateMultipartReadSetUpload API operation for Amazon Omics. +// +// Begins a multipart read set upload. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Omics's +// API operation CreateMultipartReadSetUpload for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// An unexpected error occurred. Try the request again. +// +// - NotSupportedOperationException +// The operation is not supported by Amazon Omics, or the API does not exist. +// +// - ServiceQuotaExceededException +// The request exceeds a service quota. +// +// - ThrottlingException +// The request was denied due to request throttling. +// +// - ValidationException +// The input fails to satisfy the constraints specified by an AWS service. +// +// - ResourceNotFoundException +// The target resource was not found in the current Region. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - RequestTimeoutException +// The request timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/CreateMultipartReadSetUpload +func (c *Omics) CreateMultipartReadSetUpload(input *CreateMultipartReadSetUploadInput) (*CreateMultipartReadSetUploadOutput, error) { + req, out := c.CreateMultipartReadSetUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartReadSetUploadWithContext is the same as CreateMultipartReadSetUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartReadSetUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Omics) CreateMultipartReadSetUploadWithContext(ctx aws.Context, input *CreateMultipartReadSetUploadInput, opts ...request.Option) (*CreateMultipartReadSetUploadOutput, error) { + req, out := c.CreateMultipartReadSetUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateReferenceStore = "CreateReferenceStore" // CreateReferenceStoreRequest generates a "aws/request.Request" representing the @@ -538,6 +854,7 @@ func (c *Omics) CreateReferenceStoreRequest(input *CreateReferenceStoreInput) (r output = &CreateReferenceStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -634,6 +951,7 @@ func (c *Omics) CreateRunGroupRequest(input *CreateRunGroupInput) (req *request. output = &CreateRunGroupOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -736,6 +1054,7 @@ func (c *Omics) CreateSequenceStoreRequest(input *CreateSequenceStoreInput) (req output = &CreateSequenceStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -832,6 +1151,7 @@ func (c *Omics) CreateVariantStoreRequest(input *CreateVariantStoreInput) (req * output = &CreateVariantStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -931,6 +1251,7 @@ func (c *Omics) CreateWorkflowRequest(input *CreateWorkflowInput) (req *request. output = &CreateWorkflowOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -1033,6 +1354,7 @@ func (c *Omics) DeleteAnnotationStoreRequest(input *DeleteAnnotationStoreInput) output = &DeleteAnnotationStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -1129,6 +1451,7 @@ func (c *Omics) DeleteReferenceRequest(input *DeleteReferenceInput) (req *reques output = &DeleteReferenceOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -1229,6 +1552,7 @@ func (c *Omics) DeleteReferenceStoreRequest(input *DeleteReferenceStoreInput) (r output = &DeleteReferenceStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -1329,6 +1653,7 @@ func (c *Omics) DeleteRunRequest(input *DeleteRunInput) (req *request.Request, o output = &DeleteRunOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -1432,6 +1757,7 @@ func (c *Omics) DeleteRunGroupRequest(input *DeleteRunGroupInput) (req *request. output = &DeleteRunGroupOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -1535,6 +1861,7 @@ func (c *Omics) DeleteSequenceStoreRequest(input *DeleteSequenceStoreInput) (req output = &DeleteSequenceStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -1635,6 +1962,7 @@ func (c *Omics) DeleteVariantStoreRequest(input *DeleteVariantStoreInput) (req * output = &DeleteVariantStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -1731,6 +2059,7 @@ func (c *Omics) DeleteWorkflowRequest(input *DeleteWorkflowInput) (req *request. output = &DeleteWorkflowOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -1834,6 +2163,7 @@ func (c *Omics) GetAnnotationImportJobRequest(input *GetAnnotationImportJobInput output = &GetAnnotationImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -1927,6 +2257,7 @@ func (c *Omics) GetAnnotationStoreRequest(input *GetAnnotationStoreInput) (req * output = &GetAnnotationStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2020,6 +2351,7 @@ func (c *Omics) GetReadSetRequest(input *GetReadSetInput) (req *request.Request, output = &GetReadSetOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2122,6 +2454,7 @@ func (c *Omics) GetReadSetActivationJobRequest(input *GetReadSetActivationJobInp output = &GetReadSetActivationJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2218,6 +2551,7 @@ func (c *Omics) GetReadSetExportJobRequest(input *GetReadSetExportJobInput) (req output = &GetReadSetExportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2314,6 +2648,7 @@ func (c *Omics) GetReadSetImportJobRequest(input *GetReadSetImportJobInput) (req output = &GetReadSetImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2410,6 +2745,7 @@ func (c *Omics) GetReadSetMetadataRequest(input *GetReadSetMetadataInput) (req * output = &GetReadSetMetadataOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2506,6 +2842,7 @@ func (c *Omics) GetReferenceRequest(input *GetReferenceInput) (req *request.Requ output = &GetReferenceOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2605,6 +2942,7 @@ func (c *Omics) GetReferenceImportJobRequest(input *GetReferenceImportJobInput) output = &GetReferenceImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2701,6 +3039,7 @@ func (c *Omics) GetReferenceMetadataRequest(input *GetReferenceMetadataInput) (r output = &GetReferenceMetadataOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2797,6 +3136,7 @@ func (c *Omics) GetReferenceStoreRequest(input *GetReferenceStoreInput) (req *re output = &GetReferenceStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2893,6 +3233,7 @@ func (c *Omics) GetRunRequest(input *GetRunInput) (req *request.Request, output output = &GetRunOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -2995,6 +3336,7 @@ func (c *Omics) GetRunGroupRequest(input *GetRunGroupInput) (req *request.Reques output = &GetRunGroupOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -3097,6 +3439,7 @@ func (c *Omics) GetRunTaskRequest(input *GetRunTaskInput) (req *request.Request, output = &GetRunTaskOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -3199,6 +3542,7 @@ func (c *Omics) GetSequenceStoreRequest(input *GetSequenceStoreInput) (req *requ output = &GetSequenceStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -3295,6 +3639,7 @@ func (c *Omics) GetVariantImportJobRequest(input *GetVariantImportJobInput) (req output = &GetVariantImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -3388,6 +3733,7 @@ func (c *Omics) GetVariantStoreRequest(input *GetVariantStoreInput) (req *reques output = &GetVariantStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -3481,6 +3827,7 @@ func (c *Omics) GetWorkflowRequest(input *GetWorkflowInput) (req *request.Reques output = &GetWorkflowOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -3589,6 +3936,7 @@ func (c *Omics) ListAnnotationImportJobsRequest(input *ListAnnotationImportJobsI output = &ListAnnotationImportJobsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -3739,6 +4087,7 @@ func (c *Omics) ListAnnotationStoresRequest(input *ListAnnotationStoresInput) (r output = &ListAnnotationStoresOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -3845,6 +4194,166 @@ func (c *Omics) ListAnnotationStoresPagesWithContext(ctx aws.Context, input *Lis return p.Err() } +const opListMultipartReadSetUploads = "ListMultipartReadSetUploads" + +// ListMultipartReadSetUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartReadSetUploads operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMultipartReadSetUploads for more information on using the ListMultipartReadSetUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListMultipartReadSetUploadsRequest method. +// req, resp := client.ListMultipartReadSetUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListMultipartReadSetUploads +func (c *Omics) ListMultipartReadSetUploadsRequest(input *ListMultipartReadSetUploadsInput) (req *request.Request, output *ListMultipartReadSetUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartReadSetUploads, + HTTPMethod: "POST", + HTTPPath: "/sequencestore/{sequenceStoreId}/uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMultipartReadSetUploadsInput{} + } + + output = &ListMultipartReadSetUploadsOutput{} + req = c.newRequest(op, input, output) + + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// ListMultipartReadSetUploads API operation for Amazon Omics. +// +// Lists all multipart read set uploads and their statuses. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Omics's +// API operation ListMultipartReadSetUploads for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// An unexpected error occurred. Try the request again. +// +// - NotSupportedOperationException +// The operation is not supported by Amazon Omics, or the API does not exist. +// +// - ServiceQuotaExceededException +// The request exceeds a service quota. +// +// - ThrottlingException +// The request was denied due to request throttling. +// +// - ValidationException +// The input fails to satisfy the constraints specified by an AWS service. +// +// - ResourceNotFoundException +// The target resource was not found in the current Region. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - RequestTimeoutException +// The request timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListMultipartReadSetUploads +func (c *Omics) ListMultipartReadSetUploads(input *ListMultipartReadSetUploadsInput) (*ListMultipartReadSetUploadsOutput, error) { + req, out := c.ListMultipartReadSetUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartReadSetUploadsWithContext is the same as ListMultipartReadSetUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartReadSetUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Omics) ListMultipartReadSetUploadsWithContext(ctx aws.Context, input *ListMultipartReadSetUploadsInput, opts ...request.Option) (*ListMultipartReadSetUploadsOutput, error) { + req, out := c.ListMultipartReadSetUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartReadSetUploadsPages iterates over the pages of a ListMultipartReadSetUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartReadSetUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartReadSetUploads operation. +// pageNum := 0 +// err := client.ListMultipartReadSetUploadsPages(params, +// func(page *omics.ListMultipartReadSetUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Omics) ListMultipartReadSetUploadsPages(input *ListMultipartReadSetUploadsInput, fn func(*ListMultipartReadSetUploadsOutput, bool) bool) error { + return c.ListMultipartReadSetUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartReadSetUploadsPagesWithContext same as ListMultipartReadSetUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Omics) ListMultipartReadSetUploadsPagesWithContext(ctx aws.Context, input *ListMultipartReadSetUploadsInput, fn func(*ListMultipartReadSetUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartReadSetUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartReadSetUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMultipartReadSetUploadsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListReadSetActivationJobs = "ListReadSetActivationJobs" // ListReadSetActivationJobsRequest generates a "aws/request.Request" representing the @@ -3889,6 +4398,7 @@ func (c *Omics) ListReadSetActivationJobsRequest(input *ListReadSetActivationJob output = &ListReadSetActivationJobsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -4042,6 +4552,7 @@ func (c *Omics) ListReadSetExportJobsRequest(input *ListReadSetExportJobsInput) output = &ListReadSetExportJobsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -4195,6 +4706,7 @@ func (c *Omics) ListReadSetImportJobsRequest(input *ListReadSetImportJobsInput) output = &ListReadSetImportJobsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -4304,36 +4816,36 @@ func (c *Omics) ListReadSetImportJobsPagesWithContext(ctx aws.Context, input *Li return p.Err() } -const opListReadSets = "ListReadSets" +const opListReadSetUploadParts = "ListReadSetUploadParts" -// ListReadSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListReadSets operation. The "output" return +// ListReadSetUploadPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListReadSetUploadParts operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListReadSets for more information on using the ListReadSets +// See ListReadSetUploadParts for more information on using the ListReadSetUploadParts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the ListReadSetsRequest method. -// req, resp := client.ListReadSetsRequest(params) +// // Example sending a request using the ListReadSetUploadPartsRequest method. +// req, resp := client.ListReadSetUploadPartsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReadSets -func (c *Omics) ListReadSetsRequest(input *ListReadSetsInput) (req *request.Request, output *ListReadSetsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReadSetUploadParts +func (c *Omics) ListReadSetUploadPartsRequest(input *ListReadSetUploadPartsInput) (req *request.Request, output *ListReadSetUploadPartsOutput) { op := &request.Operation{ - Name: opListReadSets, + Name: opListReadSetUploadParts, HTTPMethod: "POST", - HTTPPath: "/sequencestore/{sequenceStoreId}/readsets", + HTTPPath: "/sequencestore/{sequenceStoreId}/upload/{uploadId}/parts", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, @@ -4343,32 +4855,40 @@ func (c *Omics) ListReadSetsRequest(input *ListReadSetsInput) (req *request.Requ } if input == nil { - input = &ListReadSetsInput{} + input = &ListReadSetUploadPartsInput{} } - output = &ListReadSetsOutput{} + output = &ListReadSetUploadPartsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// ListReadSets API operation for Amazon Omics. +// ListReadSetUploadParts API operation for Amazon Omics. // -// Retrieves a list of read sets. +// This operation will list all parts in a requested multipart upload for a +// sequence store. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Omics's -// API operation ListReadSets for usage and error information. +// API operation ListReadSetUploadParts for usage and error information. // // Returned Error Types: // // - InternalServerException // An unexpected error occurred. Try the request again. // +// - NotSupportedOperationException +// The operation is not supported by Amazon Omics, or the API does not exist. +// +// - ServiceQuotaExceededException +// The request exceeds a service quota. +// // - ThrottlingException // The request was denied due to request throttling. // @@ -4384,64 +4904,64 @@ func (c *Omics) ListReadSetsRequest(input *ListReadSetsInput) (req *request.Requ // - RequestTimeoutException // The request timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReadSets -func (c *Omics) ListReadSets(input *ListReadSetsInput) (*ListReadSetsOutput, error) { - req, out := c.ListReadSetsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReadSetUploadParts +func (c *Omics) ListReadSetUploadParts(input *ListReadSetUploadPartsInput) (*ListReadSetUploadPartsOutput, error) { + req, out := c.ListReadSetUploadPartsRequest(input) return out, req.Send() } -// ListReadSetsWithContext is the same as ListReadSets with the addition of +// ListReadSetUploadPartsWithContext is the same as ListReadSetUploadParts with the addition of // the ability to pass a context and additional request options. // -// See ListReadSets for details on how to use this API operation. +// See ListReadSetUploadParts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Omics) ListReadSetsWithContext(ctx aws.Context, input *ListReadSetsInput, opts ...request.Option) (*ListReadSetsOutput, error) { - req, out := c.ListReadSetsRequest(input) +func (c *Omics) ListReadSetUploadPartsWithContext(ctx aws.Context, input *ListReadSetUploadPartsInput, opts ...request.Option) (*ListReadSetUploadPartsOutput, error) { + req, out := c.ListReadSetUploadPartsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListReadSetsPages iterates over the pages of a ListReadSets operation, +// ListReadSetUploadPartsPages iterates over the pages of a ListReadSetUploadParts operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListReadSets method for more information on how to use this operation. +// See ListReadSetUploadParts method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListReadSets operation. +// // Example iterating over at most 3 pages of a ListReadSetUploadParts operation. // pageNum := 0 -// err := client.ListReadSetsPages(params, -// func(page *omics.ListReadSetsOutput, lastPage bool) bool { +// err := client.ListReadSetUploadPartsPages(params, +// func(page *omics.ListReadSetUploadPartsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) -func (c *Omics) ListReadSetsPages(input *ListReadSetsInput, fn func(*ListReadSetsOutput, bool) bool) error { - return c.ListReadSetsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Omics) ListReadSetUploadPartsPages(input *ListReadSetUploadPartsInput, fn func(*ListReadSetUploadPartsOutput, bool) bool) error { + return c.ListReadSetUploadPartsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListReadSetsPagesWithContext same as ListReadSetsPages except +// ListReadSetUploadPartsPagesWithContext same as ListReadSetUploadPartsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Omics) ListReadSetsPagesWithContext(ctx aws.Context, input *ListReadSetsInput, fn func(*ListReadSetsOutput, bool) bool, opts ...request.Option) error { +func (c *Omics) ListReadSetUploadPartsPagesWithContext(ctx aws.Context, input *ListReadSetUploadPartsInput, fn func(*ListReadSetUploadPartsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListReadSetsInput + var inCpy *ListReadSetUploadPartsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListReadSetsRequest(inCpy) + req, _ := c.ListReadSetUploadPartsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -4449,7 +4969,7 @@ func (c *Omics) ListReadSetsPagesWithContext(ctx aws.Context, input *ListReadSet } for p.Next() { - if !fn(p.Page().(*ListReadSetsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListReadSetUploadPartsOutput), !p.HasNextPage()) { break } } @@ -4457,36 +4977,36 @@ func (c *Omics) ListReadSetsPagesWithContext(ctx aws.Context, input *ListReadSet return p.Err() } -const opListReferenceImportJobs = "ListReferenceImportJobs" +const opListReadSets = "ListReadSets" -// ListReferenceImportJobsRequest generates a "aws/request.Request" representing the -// client's request for the ListReferenceImportJobs operation. The "output" return +// ListReadSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListReadSets operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListReferenceImportJobs for more information on using the ListReferenceImportJobs +// See ListReadSets for more information on using the ListReadSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the ListReferenceImportJobsRequest method. -// req, resp := client.ListReferenceImportJobsRequest(params) +// // Example sending a request using the ListReadSetsRequest method. +// req, resp := client.ListReadSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReferenceImportJobs -func (c *Omics) ListReferenceImportJobsRequest(input *ListReferenceImportJobsInput) (req *request.Request, output *ListReferenceImportJobsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReadSets +func (c *Omics) ListReadSetsRequest(input *ListReadSetsInput) (req *request.Request, output *ListReadSetsOutput) { op := &request.Operation{ - Name: opListReferenceImportJobs, + Name: opListReadSets, HTTPMethod: "POST", - HTTPPath: "/referencestore/{referenceStoreId}/importjobs", + HTTPPath: "/sequencestore/{sequenceStoreId}/readsets", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, @@ -4496,26 +5016,27 @@ func (c *Omics) ListReferenceImportJobsRequest(input *ListReferenceImportJobsInp } if input == nil { - input = &ListReferenceImportJobsInput{} + input = &ListReadSetsInput{} } - output = &ListReferenceImportJobsOutput{} + output = &ListReadSetsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// ListReferenceImportJobs API operation for Amazon Omics. +// ListReadSets API operation for Amazon Omics. // -// Retrieves a list of reference import jobs. +// Retrieves a list of read sets. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Omics's -// API operation ListReferenceImportJobs for usage and error information. +// API operation ListReadSets for usage and error information. // // Returned Error Types: // @@ -4537,64 +5058,64 @@ func (c *Omics) ListReferenceImportJobsRequest(input *ListReferenceImportJobsInp // - RequestTimeoutException // The request timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReferenceImportJobs -func (c *Omics) ListReferenceImportJobs(input *ListReferenceImportJobsInput) (*ListReferenceImportJobsOutput, error) { - req, out := c.ListReferenceImportJobsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReadSets +func (c *Omics) ListReadSets(input *ListReadSetsInput) (*ListReadSetsOutput, error) { + req, out := c.ListReadSetsRequest(input) return out, req.Send() } -// ListReferenceImportJobsWithContext is the same as ListReferenceImportJobs with the addition of +// ListReadSetsWithContext is the same as ListReadSets with the addition of // the ability to pass a context and additional request options. // -// See ListReferenceImportJobs for details on how to use this API operation. +// See ListReadSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Omics) ListReferenceImportJobsWithContext(ctx aws.Context, input *ListReferenceImportJobsInput, opts ...request.Option) (*ListReferenceImportJobsOutput, error) { - req, out := c.ListReferenceImportJobsRequest(input) +func (c *Omics) ListReadSetsWithContext(ctx aws.Context, input *ListReadSetsInput, opts ...request.Option) (*ListReadSetsOutput, error) { + req, out := c.ListReadSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListReferenceImportJobsPages iterates over the pages of a ListReferenceImportJobs operation, +// ListReadSetsPages iterates over the pages of a ListReadSets operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListReferenceImportJobs method for more information on how to use this operation. +// See ListReadSets method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListReferenceImportJobs operation. +// // Example iterating over at most 3 pages of a ListReadSets operation. // pageNum := 0 -// err := client.ListReferenceImportJobsPages(params, -// func(page *omics.ListReferenceImportJobsOutput, lastPage bool) bool { +// err := client.ListReadSetsPages(params, +// func(page *omics.ListReadSetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) -func (c *Omics) ListReferenceImportJobsPages(input *ListReferenceImportJobsInput, fn func(*ListReferenceImportJobsOutput, bool) bool) error { - return c.ListReferenceImportJobsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Omics) ListReadSetsPages(input *ListReadSetsInput, fn func(*ListReadSetsOutput, bool) bool) error { + return c.ListReadSetsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListReferenceImportJobsPagesWithContext same as ListReferenceImportJobsPages except +// ListReadSetsPagesWithContext same as ListReadSetsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Omics) ListReferenceImportJobsPagesWithContext(ctx aws.Context, input *ListReferenceImportJobsInput, fn func(*ListReferenceImportJobsOutput, bool) bool, opts ...request.Option) error { +func (c *Omics) ListReadSetsPagesWithContext(ctx aws.Context, input *ListReadSetsInput, fn func(*ListReadSetsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListReferenceImportJobsInput + var inCpy *ListReadSetsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListReferenceImportJobsRequest(inCpy) + req, _ := c.ListReadSetsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -4602,7 +5123,7 @@ func (c *Omics) ListReferenceImportJobsPagesWithContext(ctx aws.Context, input * } for p.Next() { - if !fn(p.Page().(*ListReferenceImportJobsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListReadSetsOutput), !p.HasNextPage()) { break } } @@ -4610,7 +5131,161 @@ func (c *Omics) ListReferenceImportJobsPagesWithContext(ctx aws.Context, input * return p.Err() } -const opListReferenceStores = "ListReferenceStores" +const opListReferenceImportJobs = "ListReferenceImportJobs" + +// ListReferenceImportJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListReferenceImportJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListReferenceImportJobs for more information on using the ListReferenceImportJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListReferenceImportJobsRequest method. +// req, resp := client.ListReferenceImportJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReferenceImportJobs +func (c *Omics) ListReferenceImportJobsRequest(input *ListReferenceImportJobsInput) (req *request.Request, output *ListReferenceImportJobsOutput) { + op := &request.Operation{ + Name: opListReferenceImportJobs, + HTTPMethod: "POST", + HTTPPath: "/referencestore/{referenceStoreId}/importjobs", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListReferenceImportJobsInput{} + } + + output = &ListReferenceImportJobsOutput{} + req = c.newRequest(op, input, output) + + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// ListReferenceImportJobs API operation for Amazon Omics. +// +// Retrieves a list of reference import jobs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Omics's +// API operation ListReferenceImportJobs for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// An unexpected error occurred. Try the request again. +// +// - ThrottlingException +// The request was denied due to request throttling. +// +// - ValidationException +// The input fails to satisfy the constraints specified by an AWS service. +// +// - ResourceNotFoundException +// The target resource was not found in the current Region. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - RequestTimeoutException +// The request timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/ListReferenceImportJobs +func (c *Omics) ListReferenceImportJobs(input *ListReferenceImportJobsInput) (*ListReferenceImportJobsOutput, error) { + req, out := c.ListReferenceImportJobsRequest(input) + return out, req.Send() +} + +// ListReferenceImportJobsWithContext is the same as ListReferenceImportJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListReferenceImportJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Omics) ListReferenceImportJobsWithContext(ctx aws.Context, input *ListReferenceImportJobsInput, opts ...request.Option) (*ListReferenceImportJobsOutput, error) { + req, out := c.ListReferenceImportJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListReferenceImportJobsPages iterates over the pages of a ListReferenceImportJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListReferenceImportJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListReferenceImportJobs operation. +// pageNum := 0 +// err := client.ListReferenceImportJobsPages(params, +// func(page *omics.ListReferenceImportJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Omics) ListReferenceImportJobsPages(input *ListReferenceImportJobsInput, fn func(*ListReferenceImportJobsOutput, bool) bool) error { + return c.ListReferenceImportJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListReferenceImportJobsPagesWithContext same as ListReferenceImportJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Omics) ListReferenceImportJobsPagesWithContext(ctx aws.Context, input *ListReferenceImportJobsInput, fn func(*ListReferenceImportJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListReferenceImportJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListReferenceImportJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListReferenceImportJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListReferenceStores = "ListReferenceStores" // ListReferenceStoresRequest generates a "aws/request.Request" representing the // client's request for the ListReferenceStores operation. The "output" return @@ -4654,6 +5329,7 @@ func (c *Omics) ListReferenceStoresRequest(input *ListReferenceStoresInput) (req output = &ListReferenceStoresOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -4804,6 +5480,7 @@ func (c *Omics) ListReferencesRequest(input *ListReferencesInput) (req *request. output = &ListReferencesOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -4957,6 +5634,7 @@ func (c *Omics) ListRunGroupsRequest(input *ListRunGroupsInput) (req *request.Re output = &ListRunGroupsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -5116,6 +5794,7 @@ func (c *Omics) ListRunTasksRequest(input *ListRunTasksInput) (req *request.Requ output = &ListRunTasksOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -5275,6 +5954,7 @@ func (c *Omics) ListRunsRequest(input *ListRunsInput) (req *request.Request, out output = &ListRunsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -5434,6 +6114,7 @@ func (c *Omics) ListSequenceStoresRequest(input *ListSequenceStoresInput) (req * output = &ListSequenceStoresOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -5578,6 +6259,7 @@ func (c *Omics) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("tags-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -5686,6 +6368,7 @@ func (c *Omics) ListVariantImportJobsRequest(input *ListVariantImportJobsInput) output = &ListVariantImportJobsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -5836,6 +6519,7 @@ func (c *Omics) ListVariantStoresRequest(input *ListVariantStoresInput) (req *re output = &ListVariantStoresOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -5986,6 +6670,7 @@ func (c *Omics) ListWorkflowsRequest(input *ListWorkflowsInput) (req *request.Re output = &ListWorkflowsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -6139,6 +6824,7 @@ func (c *Omics) StartAnnotationImportJobRequest(input *StartAnnotationImportJobI output = &StartAnnotationImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -6235,6 +6921,7 @@ func (c *Omics) StartReadSetActivationJobRequest(input *StartReadSetActivationJo output = &StartReadSetActivationJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -6335,6 +7022,7 @@ func (c *Omics) StartReadSetExportJobRequest(input *StartReadSetExportJobInput) output = &StartReadSetExportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -6434,6 +7122,7 @@ func (c *Omics) StartReadSetImportJobRequest(input *StartReadSetImportJobInput) output = &StartReadSetImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -6533,6 +7222,7 @@ func (c *Omics) StartReferenceImportJobRequest(input *StartReferenceImportJobInp output = &StartReferenceImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("control-storage-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -6632,6 +7322,7 @@ func (c *Omics) StartRunRequest(input *StartRunInput) (req *request.Request, out output = &StartRunOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -6734,6 +7425,7 @@ func (c *Omics) StartVariantImportJobRequest(input *StartVariantImportJobInput) output = &StartVariantImportJobOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -6830,6 +7522,7 @@ func (c *Omics) TagResourceRequest(input *TagResourceInput) (req *request.Reques output = &TagResourceOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("tags-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -6933,6 +7626,7 @@ func (c *Omics) UntagResourceRequest(input *UntagResourceInput) (req *request.Re output = &UntagResourceOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("tags-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -7036,6 +7730,7 @@ func (c *Omics) UpdateAnnotationStoreRequest(input *UpdateAnnotationStoreInput) output = &UpdateAnnotationStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -7129,6 +7824,7 @@ func (c *Omics) UpdateRunGroupRequest(input *UpdateRunGroupInput) (req *request. output = &UpdateRunGroupOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -7232,6 +7928,7 @@ func (c *Omics) UpdateVariantStoreRequest(input *UpdateVariantStoreInput) (req * output = &UpdateVariantStoreOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("analytics-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -7325,6 +8022,7 @@ func (c *Omics) UpdateWorkflowRequest(input *UpdateWorkflowInput) (req *request. output = &UpdateWorkflowOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("workflows-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -7390,72 +8088,267 @@ func (c *Omics) UpdateWorkflowWithContext(ctx aws.Context, input *UpdateWorkflow return out, req.Send() } -// You do not have sufficient access to perform this action. -type AccessDeniedException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} +const opUploadReadSetPart = "UploadReadSetPart" -// String returns the string representation. +// UploadReadSetPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadReadSetPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccessDeniedException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccessDeniedException) GoString() string { - return s.String() -} - -func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { - return &AccessDeniedException{ - RespMetadata: v, +// See UploadReadSetPart for more information on using the UploadReadSetPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UploadReadSetPartRequest method. +// req, resp := client.UploadReadSetPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/UploadReadSetPart +func (c *Omics) UploadReadSetPartRequest(input *UploadReadSetPartInput) (req *request.Request, output *UploadReadSetPartOutput) { + op := &request.Operation{ + Name: opUploadReadSetPart, + HTTPMethod: "PUT", + HTTPPath: "/sequencestore/{sequenceStoreId}/upload/{uploadId}/part", } -} - -// Code returns the exception type name. -func (s *AccessDeniedException) Code() string { - return "AccessDeniedException" -} -// Message returns the exception's message. -func (s *AccessDeniedException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if input == nil { + input = &UploadReadSetPartInput{} } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *AccessDeniedException) OrigErr() error { - return nil -} - -func (s *AccessDeniedException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *AccessDeniedException) StatusCode() int { - return s.RespMetadata.StatusCode -} -// RequestID returns the service's response RequestID for request. -func (s *AccessDeniedException) RequestID() string { - return s.RespMetadata.RequestID + output = &UploadReadSetPartOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Sign.Remove(v4.SignRequestHandler) + handler := v4.BuildNamedHandler("v4.CustomSignerHandler", v4.WithUnsignedPayload) + req.Handlers.Sign.PushFrontNamed(handler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("storage-", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return } -// A read set activation job filter. -type ActivateReadSetFilter struct { +// UploadReadSetPart API operation for Amazon Omics. +// +// This operation uploads a specific part of a read set. If you upload a new +// part using a previously used part number, the previously uploaded part will +// be overwritten. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Omics's +// API operation UploadReadSetPart for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// An unexpected error occurred. Try the request again. +// +// - NotSupportedOperationException +// The operation is not supported by Amazon Omics, or the API does not exist. +// +// - ServiceQuotaExceededException +// The request exceeds a service quota. +// +// - ThrottlingException +// The request was denied due to request throttling. +// +// - ValidationException +// The input fails to satisfy the constraints specified by an AWS service. +// +// - ResourceNotFoundException +// The target resource was not found in the current Region. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - RequestTimeoutException +// The request timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28/UploadReadSetPart +func (c *Omics) UploadReadSetPart(input *UploadReadSetPartInput) (*UploadReadSetPartOutput, error) { + req, out := c.UploadReadSetPartRequest(input) + return out, req.Send() +} + +// UploadReadSetPartWithContext is the same as UploadReadSetPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadReadSetPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Omics) UploadReadSetPartWithContext(ctx aws.Context, input *UploadReadSetPartInput, opts ...request.Option) (*UploadReadSetPartOutput, error) { + req, out := c.UploadReadSetPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AbortMultipartReadSetUploadInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The sequence store ID for the store involved in the multipart upload. + // + // SequenceStoreId is a required field + SequenceStoreId *string `location:"uri" locationName:"sequenceStoreId" min:"10" type:"string" required:"true"` + + // The ID for the multipart upload. + // + // UploadId is a required field + UploadId *string `location:"uri" locationName:"uploadId" min:"10" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartReadSetUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartReadSetUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartReadSetUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartReadSetUploadInput"} + if s.SequenceStoreId == nil { + invalidParams.Add(request.NewErrParamRequired("SequenceStoreId")) + } + if s.SequenceStoreId != nil && len(*s.SequenceStoreId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("SequenceStoreId", 10)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + if s.UploadId != nil && len(*s.UploadId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("UploadId", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSequenceStoreId sets the SequenceStoreId field's value. +func (s *AbortMultipartReadSetUploadInput) SetSequenceStoreId(v string) *AbortMultipartReadSetUploadInput { + s.SequenceStoreId = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartReadSetUploadInput) SetUploadId(v string) *AbortMultipartReadSetUploadInput { + s.UploadId = &v + return s +} + +type AbortMultipartReadSetUploadOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartReadSetUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartReadSetUploadOutput) GoString() string { + return s.String() +} + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A read set activation job filter. +type ActivateReadSetFilter struct { _ struct{} `type:"structure"` // The filter's start date. @@ -7730,6 +8623,9 @@ func (s *AnnotationImportItemSource) SetSource(v string) *AnnotationImportItemSo type AnnotationImportJobItem struct { _ struct{} `type:"structure"` + // The annotation schema generated by the parsed annotation data. + AnnotationFields map[string]*string `locationName:"annotationFields" type:"map"` + // When the job completed. CompletionTime *time.Time `locationName:"completionTime" type:"timestamp" timestampFormat:"iso8601"` @@ -7785,6 +8681,12 @@ func (s AnnotationImportJobItem) GoString() string { return s.String() } +// SetAnnotationFields sets the AnnotationFields field's value. +func (s *AnnotationImportJobItem) SetAnnotationFields(v map[string]*string) *AnnotationImportJobItem { + s.AnnotationFields = v + return s +} + // SetCompletionTime sets the CompletionTime field's value. func (s *AnnotationImportJobItem) SetCompletionTime(v time.Time) *AnnotationImportJobItem { s.CompletionTime = &v @@ -8298,12 +9200,23 @@ func (s CancelVariantImportJobOutput) GoString() string { return s.String() } -// The request cannot be applied to the target resource in its current state. -type ConflictException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` +type CompleteMultipartReadSetUploadInput struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"message" type:"string"` + // The individual uploads or parts of a multipart upload. + // + // Parts is a required field + Parts []*CompleteReadSetUploadPartListItem `locationName:"parts" type:"list" required:"true"` + + // The sequence store ID for the store involved in the multipart upload. + // + // SequenceStoreId is a required field + SequenceStoreId *string `location:"uri" locationName:"sequenceStoreId" min:"10" type:"string" required:"true"` + + // The ID for the multipart upload. + // + // UploadId is a required field + UploadId *string `location:"uri" locationName:"uploadId" min:"10" type:"string" required:"true"` } // String returns the string representation. @@ -8311,7 +9224,7 @@ type ConflictException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ConflictException) String() string { +func (s CompleteMultipartReadSetUploadInput) String() string { return awsutil.Prettify(s) } @@ -8320,72 +9233,493 @@ func (s ConflictException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ConflictException) GoString() string { +func (s CompleteMultipartReadSetUploadInput) GoString() string { return s.String() } -func newErrorConflictException(v protocol.ResponseMetadata) error { - return &ConflictException{ - RespMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartReadSetUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartReadSetUploadInput"} + if s.Parts == nil { + invalidParams.Add(request.NewErrParamRequired("Parts")) + } + if s.SequenceStoreId == nil { + invalidParams.Add(request.NewErrParamRequired("SequenceStoreId")) + } + if s.SequenceStoreId != nil && len(*s.SequenceStoreId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("SequenceStoreId", 10)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + if s.UploadId != nil && len(*s.UploadId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("UploadId", 10)) + } + if s.Parts != nil { + for i, v := range s.Parts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Parts", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams } + return nil } -// Code returns the exception type name. -func (s *ConflictException) Code() string { - return "ConflictException" +// SetParts sets the Parts field's value. +func (s *CompleteMultipartReadSetUploadInput) SetParts(v []*CompleteReadSetUploadPartListItem) *CompleteMultipartReadSetUploadInput { + s.Parts = v + return s } -// Message returns the exception's message. -func (s *ConflictException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetSequenceStoreId sets the SequenceStoreId field's value. +func (s *CompleteMultipartReadSetUploadInput) SetSequenceStoreId(v string) *CompleteMultipartReadSetUploadInput { + s.SequenceStoreId = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ConflictException) OrigErr() error { - return nil +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartReadSetUploadInput) SetUploadId(v string) *CompleteMultipartReadSetUploadInput { + s.UploadId = &v + return s } -func (s *ConflictException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +type CompleteMultipartReadSetUploadOutput struct { + _ struct{} `type:"structure"` + + // The read set ID created for an uploaded read set. + // + // ReadSetId is a required field + ReadSetId *string `locationName:"readSetId" min:"10" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartReadSetUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartReadSetUploadOutput) GoString() string { + return s.String() +} + +// SetReadSetId sets the ReadSetId field's value. +func (s *CompleteMultipartReadSetUploadOutput) SetReadSetId(v string) *CompleteMultipartReadSetUploadOutput { + s.ReadSetId = &v + return s +} + +// Part of the response to the CompleteReadSetUpload API, including metadata. +type CompleteReadSetUploadPartListItem struct { + _ struct{} `type:"structure"` + + // A unique identifier used to confirm that parts are being added to the correct + // upload. + // + // Checksum is a required field + Checksum *string `locationName:"checksum" type:"string" required:"true"` + + // A number identifying the part in a read set upload. + // + // PartNumber is a required field + PartNumber *int64 `locationName:"partNumber" min:"1" type:"integer" required:"true"` + + // The source file of the part being uploaded. + // + // PartSource is a required field + PartSource *string `locationName:"partSource" type:"string" required:"true" enum:"ReadSetPartSource"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteReadSetUploadPartListItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteReadSetUploadPartListItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteReadSetUploadPartListItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteReadSetUploadPartListItem"} + if s.Checksum == nil { + invalidParams.Add(request.NewErrParamRequired("Checksum")) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.PartNumber != nil && *s.PartNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("PartNumber", 1)) + } + if s.PartSource == nil { + invalidParams.Add(request.NewErrParamRequired("PartSource")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChecksum sets the Checksum field's value. +func (s *CompleteReadSetUploadPartListItem) SetChecksum(v string) *CompleteReadSetUploadPartListItem { + s.Checksum = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompleteReadSetUploadPartListItem) SetPartNumber(v int64) *CompleteReadSetUploadPartListItem { + s.PartNumber = &v + return s +} + +// SetPartSource sets the PartSource field's value. +func (s *CompleteReadSetUploadPartListItem) SetPartSource(v string) *CompleteReadSetUploadPartListItem { + s.PartSource = &v + return s +} + +// The request cannot be applied to the target resource in its current state. +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConflictException) GoString() string { + return s.String() +} + +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateAnnotationStoreInput struct { + _ struct{} `type:"structure"` + + // A description for the store. + Description *string `locationName:"description" type:"string"` + + // A name for the store. + Name *string `locationName:"name" type:"string"` + + // The genome reference for the store's annotations. + Reference *ReferenceItem `locationName:"reference" type:"structure"` + + // Server-side encryption (SSE) settings for the store. + SseConfig *SseConfig `locationName:"sseConfig" type:"structure"` + + // The annotation file format of the store. + // + // StoreFormat is a required field + StoreFormat *string `locationName:"storeFormat" type:"string" required:"true" enum:"StoreFormat"` + + // File parsing options for the annotation store. + StoreOptions *StoreOptions `locationName:"storeOptions" type:"structure"` + + // Tags for the store. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateAnnotationStoreInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateAnnotationStoreInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAnnotationStoreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAnnotationStoreInput"} + if s.StoreFormat == nil { + invalidParams.Add(request.NewErrParamRequired("StoreFormat")) + } + if s.Reference != nil { + if err := s.Reference.Validate(); err != nil { + invalidParams.AddNested("Reference", err.(request.ErrInvalidParams)) + } + } + if s.SseConfig != nil { + if err := s.SseConfig.Validate(); err != nil { + invalidParams.AddNested("SseConfig", err.(request.ErrInvalidParams)) + } + } + if s.StoreOptions != nil { + if err := s.StoreOptions.Validate(); err != nil { + invalidParams.AddNested("StoreOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateAnnotationStoreInput) SetDescription(v string) *CreateAnnotationStoreInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateAnnotationStoreInput) SetName(v string) *CreateAnnotationStoreInput { + s.Name = &v + return s +} + +// SetReference sets the Reference field's value. +func (s *CreateAnnotationStoreInput) SetReference(v *ReferenceItem) *CreateAnnotationStoreInput { + s.Reference = v + return s +} + +// SetSseConfig sets the SseConfig field's value. +func (s *CreateAnnotationStoreInput) SetSseConfig(v *SseConfig) *CreateAnnotationStoreInput { + s.SseConfig = v + return s +} + +// SetStoreFormat sets the StoreFormat field's value. +func (s *CreateAnnotationStoreInput) SetStoreFormat(v string) *CreateAnnotationStoreInput { + s.StoreFormat = &v + return s +} + +// SetStoreOptions sets the StoreOptions field's value. +func (s *CreateAnnotationStoreInput) SetStoreOptions(v *StoreOptions) *CreateAnnotationStoreInput { + s.StoreOptions = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateAnnotationStoreInput) SetTags(v map[string]*string) *CreateAnnotationStoreInput { + s.Tags = v + return s +} + +type CreateAnnotationStoreOutput struct { + _ struct{} `type:"structure"` + + // When the store was created. + // + // CreationTime is a required field + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The store's ID. + // + // Id is a required field + Id *string `locationName:"id" type:"string" required:"true"` + + // The store's name. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The store's genome reference. Required for all stores except TSV format with + // generic annotations. + Reference *ReferenceItem `locationName:"reference" type:"structure"` + + // The store's status. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"StoreStatus"` + + // The annotation file format of the store. + StoreFormat *string `locationName:"storeFormat" type:"string" enum:"StoreFormat"` + + // The store's file parsing options. + StoreOptions *StoreOptions `locationName:"storeOptions" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateAnnotationStoreOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateAnnotationStoreOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *CreateAnnotationStoreOutput) SetCreationTime(v time.Time) *CreateAnnotationStoreOutput { + s.CreationTime = &v + return s +} + +// SetId sets the Id field's value. +func (s *CreateAnnotationStoreOutput) SetId(v string) *CreateAnnotationStoreOutput { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateAnnotationStoreOutput) SetName(v string) *CreateAnnotationStoreOutput { + s.Name = &v + return s +} + +// SetReference sets the Reference field's value. +func (s *CreateAnnotationStoreOutput) SetReference(v *ReferenceItem) *CreateAnnotationStoreOutput { + s.Reference = v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateAnnotationStoreOutput) SetStatus(v string) *CreateAnnotationStoreOutput { + s.Status = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *ConflictException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetStoreFormat sets the StoreFormat field's value. +func (s *CreateAnnotationStoreOutput) SetStoreFormat(v string) *CreateAnnotationStoreOutput { + s.StoreFormat = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s *ConflictException) RequestID() string { - return s.RespMetadata.RequestID +// SetStoreOptions sets the StoreOptions field's value. +func (s *CreateAnnotationStoreOutput) SetStoreOptions(v *StoreOptions) *CreateAnnotationStoreOutput { + s.StoreOptions = v + return s } -type CreateAnnotationStoreInput struct { +type CreateMultipartReadSetUploadInput struct { _ struct{} `type:"structure"` - // A description for the store. - Description *string `locationName:"description" type:"string"` + // An idempotency token that can be used to avoid triggering multiple multipart + // uploads. + ClientToken *string `locationName:"clientToken" min:"1" type:"string"` - // A name for the store. - Name *string `locationName:"name" type:"string"` + // The description of the read set. + Description *string `locationName:"description" min:"1" type:"string"` - // The genome reference for the store's annotations. - Reference *ReferenceItem `locationName:"reference" type:"structure"` + // Where the source originated. + GeneratedFrom *string `locationName:"generatedFrom" min:"1" type:"string"` - // Server-side encryption (SSE) settings for the store. - SseConfig *SseConfig `locationName:"sseConfig" type:"structure"` + // The name of the read set. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The annotation file format of the store. + // The ARN of the reference. // - // StoreFormat is a required field - StoreFormat *string `locationName:"storeFormat" type:"string" required:"true" enum:"StoreFormat"` + // ReferenceArn is a required field + ReferenceArn *string `locationName:"referenceArn" min:"1" type:"string" required:"true"` - // File parsing options for the annotation store. - StoreOptions *StoreOptions `locationName:"storeOptions" type:"structure"` + // The source's sample ID. + // + // SampleId is a required field + SampleId *string `locationName:"sampleId" min:"1" type:"string" required:"true"` - // Tags for the store. + // The sequence store ID for the store that is the destination of the multipart + // uploads. + // + // SequenceStoreId is a required field + SequenceStoreId *string `location:"uri" locationName:"sequenceStoreId" min:"10" type:"string" required:"true"` + + // The type of file being uploaded. + // + // SourceFileType is a required field + SourceFileType *string `locationName:"sourceFileType" type:"string" required:"true" enum:"FileType"` + + // The source's subject ID. + // + // SubjectId is a required field + SubjectId *string `locationName:"subjectId" min:"1" type:"string" required:"true"` + + // Any tags to add to the read set. Tags map[string]*string `locationName:"tags" type:"map"` } @@ -8394,7 +9728,7 @@ type CreateAnnotationStoreInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateAnnotationStoreInput) String() string { +func (s CreateMultipartReadSetUploadInput) String() string { return awsutil.Prettify(s) } @@ -8403,30 +9737,54 @@ func (s CreateAnnotationStoreInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateAnnotationStoreInput) GoString() string { +func (s CreateMultipartReadSetUploadInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateAnnotationStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateAnnotationStoreInput"} - if s.StoreFormat == nil { - invalidParams.Add(request.NewErrParamRequired("StoreFormat")) +func (s *CreateMultipartReadSetUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartReadSetUploadInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) } - if s.Reference != nil { - if err := s.Reference.Validate(); err != nil { - invalidParams.AddNested("Reference", err.(request.ErrInvalidParams)) - } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) } - if s.SseConfig != nil { - if err := s.SseConfig.Validate(); err != nil { - invalidParams.AddNested("SseConfig", err.(request.ErrInvalidParams)) - } + if s.GeneratedFrom != nil && len(*s.GeneratedFrom) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GeneratedFrom", 1)) } - if s.StoreOptions != nil { - if err := s.StoreOptions.Validate(); err != nil { - invalidParams.AddNested("StoreOptions", err.(request.ErrInvalidParams)) - } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.ReferenceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReferenceArn")) + } + if s.ReferenceArn != nil && len(*s.ReferenceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReferenceArn", 1)) + } + if s.SampleId == nil { + invalidParams.Add(request.NewErrParamRequired("SampleId")) + } + if s.SampleId != nil && len(*s.SampleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SampleId", 1)) + } + if s.SequenceStoreId == nil { + invalidParams.Add(request.NewErrParamRequired("SequenceStoreId")) + } + if s.SequenceStoreId != nil && len(*s.SequenceStoreId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("SequenceStoreId", 10)) + } + if s.SourceFileType == nil { + invalidParams.Add(request.NewErrParamRequired("SourceFileType")) + } + if s.SubjectId == nil { + invalidParams.Add(request.NewErrParamRequired("SubjectId")) + } + if s.SubjectId != nil && len(*s.SubjectId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubjectId", 1)) } if invalidParams.Len() > 0 { @@ -8435,80 +9793,115 @@ func (s *CreateAnnotationStoreInput) Validate() error { return nil } +// SetClientToken sets the ClientToken field's value. +func (s *CreateMultipartReadSetUploadInput) SetClientToken(v string) *CreateMultipartReadSetUploadInput { + s.ClientToken = &v + return s +} + // SetDescription sets the Description field's value. -func (s *CreateAnnotationStoreInput) SetDescription(v string) *CreateAnnotationStoreInput { +func (s *CreateMultipartReadSetUploadInput) SetDescription(v string) *CreateMultipartReadSetUploadInput { s.Description = &v return s } +// SetGeneratedFrom sets the GeneratedFrom field's value. +func (s *CreateMultipartReadSetUploadInput) SetGeneratedFrom(v string) *CreateMultipartReadSetUploadInput { + s.GeneratedFrom = &v + return s +} + // SetName sets the Name field's value. -func (s *CreateAnnotationStoreInput) SetName(v string) *CreateAnnotationStoreInput { +func (s *CreateMultipartReadSetUploadInput) SetName(v string) *CreateMultipartReadSetUploadInput { s.Name = &v return s } -// SetReference sets the Reference field's value. -func (s *CreateAnnotationStoreInput) SetReference(v *ReferenceItem) *CreateAnnotationStoreInput { - s.Reference = v +// SetReferenceArn sets the ReferenceArn field's value. +func (s *CreateMultipartReadSetUploadInput) SetReferenceArn(v string) *CreateMultipartReadSetUploadInput { + s.ReferenceArn = &v return s } -// SetSseConfig sets the SseConfig field's value. -func (s *CreateAnnotationStoreInput) SetSseConfig(v *SseConfig) *CreateAnnotationStoreInput { - s.SseConfig = v +// SetSampleId sets the SampleId field's value. +func (s *CreateMultipartReadSetUploadInput) SetSampleId(v string) *CreateMultipartReadSetUploadInput { + s.SampleId = &v return s } -// SetStoreFormat sets the StoreFormat field's value. -func (s *CreateAnnotationStoreInput) SetStoreFormat(v string) *CreateAnnotationStoreInput { - s.StoreFormat = &v +// SetSequenceStoreId sets the SequenceStoreId field's value. +func (s *CreateMultipartReadSetUploadInput) SetSequenceStoreId(v string) *CreateMultipartReadSetUploadInput { + s.SequenceStoreId = &v return s } -// SetStoreOptions sets the StoreOptions field's value. -func (s *CreateAnnotationStoreInput) SetStoreOptions(v *StoreOptions) *CreateAnnotationStoreInput { - s.StoreOptions = v +// SetSourceFileType sets the SourceFileType field's value. +func (s *CreateMultipartReadSetUploadInput) SetSourceFileType(v string) *CreateMultipartReadSetUploadInput { + s.SourceFileType = &v + return s +} + +// SetSubjectId sets the SubjectId field's value. +func (s *CreateMultipartReadSetUploadInput) SetSubjectId(v string) *CreateMultipartReadSetUploadInput { + s.SubjectId = &v return s } // SetTags sets the Tags field's value. -func (s *CreateAnnotationStoreInput) SetTags(v map[string]*string) *CreateAnnotationStoreInput { +func (s *CreateMultipartReadSetUploadInput) SetTags(v map[string]*string) *CreateMultipartReadSetUploadInput { s.Tags = v return s } -type CreateAnnotationStoreOutput struct { +type CreateMultipartReadSetUploadOutput struct { _ struct{} `type:"structure"` - // When the store was created. + // The creation time of the multipart upload. // // CreationTime is a required field CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` - // The store's ID. + // The description of the read set. + Description *string `locationName:"description" min:"1" type:"string"` + + // The source of the read set. + GeneratedFrom *string `locationName:"generatedFrom" min:"1" type:"string"` + + // The name of the read set. + Name *string `locationName:"name" min:"1" type:"string"` + + // The read set source's reference ARN. // - // Id is a required field - Id *string `locationName:"id" type:"string" required:"true"` + // ReferenceArn is a required field + ReferenceArn *string `locationName:"referenceArn" min:"1" type:"string" required:"true"` - // The store's name. + // The source's sample ID. // - // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` + // SampleId is a required field + SampleId *string `locationName:"sampleId" min:"1" type:"string" required:"true"` - // The store's genome reference. Required for all stores except TSV format with - // generic annotations. - Reference *ReferenceItem `locationName:"reference" type:"structure"` + // The sequence store ID for the store that the read set will be created in. + // + // SequenceStoreId is a required field + SequenceStoreId *string `locationName:"sequenceStoreId" min:"10" type:"string" required:"true"` - // The store's status. + // The file type of the read set source. // - // Status is a required field - Status *string `locationName:"status" type:"string" required:"true" enum:"StoreStatus"` + // SourceFileType is a required field + SourceFileType *string `locationName:"sourceFileType" type:"string" required:"true" enum:"FileType"` - // The annotation file format of the store. - StoreFormat *string `locationName:"storeFormat" type:"string" enum:"StoreFormat"` + // The source's subject ID. + // + // SubjectId is a required field + SubjectId *string `locationName:"subjectId" min:"1" type:"string" required:"true"` - // The store's file parsing options. - StoreOptions *StoreOptions `locationName:"storeOptions" type:"structure"` + // The tags to add to the read set. + Tags map[string]*string `locationName:"tags" type:"map"` + + // he ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `locationName:"uploadId" min:"10" type:"string" required:"true"` } // String returns the string representation. @@ -8516,7 +9909,7 @@ type CreateAnnotationStoreOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateAnnotationStoreOutput) String() string { +func (s CreateMultipartReadSetUploadOutput) String() string { return awsutil.Prettify(s) } @@ -8525,49 +9918,73 @@ func (s CreateAnnotationStoreOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateAnnotationStoreOutput) GoString() string { +func (s CreateMultipartReadSetUploadOutput) GoString() string { return s.String() } // SetCreationTime sets the CreationTime field's value. -func (s *CreateAnnotationStoreOutput) SetCreationTime(v time.Time) *CreateAnnotationStoreOutput { +func (s *CreateMultipartReadSetUploadOutput) SetCreationTime(v time.Time) *CreateMultipartReadSetUploadOutput { s.CreationTime = &v return s } -// SetId sets the Id field's value. -func (s *CreateAnnotationStoreOutput) SetId(v string) *CreateAnnotationStoreOutput { - s.Id = &v +// SetDescription sets the Description field's value. +func (s *CreateMultipartReadSetUploadOutput) SetDescription(v string) *CreateMultipartReadSetUploadOutput { + s.Description = &v + return s +} + +// SetGeneratedFrom sets the GeneratedFrom field's value. +func (s *CreateMultipartReadSetUploadOutput) SetGeneratedFrom(v string) *CreateMultipartReadSetUploadOutput { + s.GeneratedFrom = &v return s } // SetName sets the Name field's value. -func (s *CreateAnnotationStoreOutput) SetName(v string) *CreateAnnotationStoreOutput { +func (s *CreateMultipartReadSetUploadOutput) SetName(v string) *CreateMultipartReadSetUploadOutput { s.Name = &v return s } -// SetReference sets the Reference field's value. -func (s *CreateAnnotationStoreOutput) SetReference(v *ReferenceItem) *CreateAnnotationStoreOutput { - s.Reference = v +// SetReferenceArn sets the ReferenceArn field's value. +func (s *CreateMultipartReadSetUploadOutput) SetReferenceArn(v string) *CreateMultipartReadSetUploadOutput { + s.ReferenceArn = &v return s } -// SetStatus sets the Status field's value. -func (s *CreateAnnotationStoreOutput) SetStatus(v string) *CreateAnnotationStoreOutput { - s.Status = &v +// SetSampleId sets the SampleId field's value. +func (s *CreateMultipartReadSetUploadOutput) SetSampleId(v string) *CreateMultipartReadSetUploadOutput { + s.SampleId = &v return s } -// SetStoreFormat sets the StoreFormat field's value. -func (s *CreateAnnotationStoreOutput) SetStoreFormat(v string) *CreateAnnotationStoreOutput { - s.StoreFormat = &v +// SetSequenceStoreId sets the SequenceStoreId field's value. +func (s *CreateMultipartReadSetUploadOutput) SetSequenceStoreId(v string) *CreateMultipartReadSetUploadOutput { + s.SequenceStoreId = &v return s } -// SetStoreOptions sets the StoreOptions field's value. -func (s *CreateAnnotationStoreOutput) SetStoreOptions(v *StoreOptions) *CreateAnnotationStoreOutput { - s.StoreOptions = v +// SetSourceFileType sets the SourceFileType field's value. +func (s *CreateMultipartReadSetUploadOutput) SetSourceFileType(v string) *CreateMultipartReadSetUploadOutput { + s.SourceFileType = &v + return s +} + +// SetSubjectId sets the SubjectId field's value. +func (s *CreateMultipartReadSetUploadOutput) SetSubjectId(v string) *CreateMultipartReadSetUploadOutput { + s.SubjectId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateMultipartReadSetUploadOutput) SetTags(v map[string]*string) *CreateMultipartReadSetUploadOutput { + s.Tags = v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartReadSetUploadOutput) SetUploadId(v string) *CreateMultipartReadSetUploadOutput { + s.UploadId = &v return s } @@ -8759,6 +10176,9 @@ type CreateRunGroupInput struct { // A maximum run time for the group in minutes. MaxDuration *int64 `locationName:"maxDuration" min:"1" type:"integer"` + // The maximum GPUs that can be used by a run group. + MaxGpus *int64 `locationName:"maxGpus" min:"1" type:"integer"` + // The maximum number of concurrent runs for the group. MaxRuns *int64 `locationName:"maxRuns" min:"1" type:"integer"` @@ -8800,6 +10220,9 @@ func (s *CreateRunGroupInput) Validate() error { if s.MaxDuration != nil && *s.MaxDuration < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxDuration", 1)) } + if s.MaxGpus != nil && *s.MaxGpus < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxGpus", 1)) + } if s.MaxRuns != nil && *s.MaxRuns < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxRuns", 1)) } @@ -8828,6 +10251,12 @@ func (s *CreateRunGroupInput) SetMaxDuration(v int64) *CreateRunGroupInput { return s } +// SetMaxGpus sets the MaxGpus field's value. +func (s *CreateRunGroupInput) SetMaxGpus(v int64) *CreateRunGroupInput { + s.MaxGpus = &v + return s +} + // SetMaxRuns sets the MaxRuns field's value. func (s *CreateRunGroupInput) SetMaxRuns(v int64) *CreateRunGroupInput { s.MaxRuns = &v @@ -8911,6 +10340,9 @@ type CreateSequenceStoreInput struct { // A description for the store. Description *string `locationName:"description" min:"1" type:"string"` + // An S3 location that is used to store files that have failed a direct upload. + FallbackLocation *string `locationName:"fallbackLocation" type:"string"` + // A name for the store. // // Name is a required field @@ -8980,6 +10412,12 @@ func (s *CreateSequenceStoreInput) SetDescription(v string) *CreateSequenceStore return s } +// SetFallbackLocation sets the FallbackLocation field's value. +func (s *CreateSequenceStoreInput) SetFallbackLocation(v string) *CreateSequenceStoreInput { + s.FallbackLocation = &v + return s +} + // SetName sets the Name field's value. func (s *CreateSequenceStoreInput) SetName(v string) *CreateSequenceStoreInput { s.Name = &v @@ -9014,6 +10452,9 @@ type CreateSequenceStoreOutput struct { // The store's description. Description *string `locationName:"description" min:"1" type:"string"` + // An S3 location that is used to store files that have failed a direct upload. + FallbackLocation *string `locationName:"fallbackLocation" type:"string"` + // The store's ID. // // Id is a required field @@ -9062,6 +10503,12 @@ func (s *CreateSequenceStoreOutput) SetDescription(v string) *CreateSequenceStor return s } +// SetFallbackLocation sets the FallbackLocation field's value. +func (s *CreateSequenceStoreOutput) SetFallbackLocation(v string) *CreateSequenceStoreOutput { + s.FallbackLocation = &v + return s +} + // SetId sets the Id field's value. func (s *CreateSequenceStoreOutput) SetId(v string) *CreateSequenceStoreOutput { s.Id = &v @@ -9250,6 +10697,9 @@ func (s *CreateVariantStoreOutput) SetStatus(v string) *CreateVariantStoreOutput type CreateWorkflowInput struct { _ struct{} `type:"structure"` + // The computational accelerator specified to run the workflow. + Accelerators *string `locationName:"accelerators" min:"1" type:"string" enum:"Accelerators"` + // The URI of a definition for the workflow. DefinitionUri *string `locationName:"definitionUri" min:"1" type:"string"` @@ -9304,6 +10754,9 @@ func (s CreateWorkflowInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateWorkflowInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateWorkflowInput"} + if s.Accelerators != nil && len(*s.Accelerators) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Accelerators", 1)) + } if s.DefinitionUri != nil && len(*s.DefinitionUri) < 1 { invalidParams.Add(request.NewErrParamMinLen("DefinitionUri", 1)) } @@ -9332,6 +10785,12 @@ func (s *CreateWorkflowInput) Validate() error { return nil } +// SetAccelerators sets the Accelerators field's value. +func (s *CreateWorkflowInput) SetAccelerators(v string) *CreateWorkflowInput { + s.Accelerators = &v + return s +} + // SetDefinitionUri sets the DefinitionUri field's value. func (s *CreateWorkflowInput) SetDefinitionUri(v string) *CreateWorkflowInput { s.DefinitionUri = &v @@ -10474,6 +11933,9 @@ func (s *GetAnnotationImportJobInput) SetJobId(v string) *GetAnnotationImportJob type GetAnnotationImportJobOutput struct { _ struct{} `type:"structure"` + // The annotation schema generated by the parsed annotation data. + AnnotationFields map[string]*string `locationName:"annotationFields" type:"map"` + // When the job completed. // // CompletionTime is a required field @@ -10548,6 +12010,12 @@ func (s GetAnnotationImportJobOutput) GoString() string { return s.String() } +// SetAnnotationFields sets the AnnotationFields field's value. +func (s *GetAnnotationImportJobOutput) SetAnnotationFields(v map[string]*string) *GetAnnotationImportJobOutput { + s.AnnotationFields = v + return s +} + // SetCompletionTime sets the CompletionTime field's value. func (s *GetAnnotationImportJobOutput) SetCompletionTime(v time.Time) *GetAnnotationImportJobOutput { s.CompletionTime = &v @@ -11545,6 +13013,10 @@ type GetReadSetMetadataOutput struct { // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"ReadSetStatus"` + // The status message for a read set. It provides more detail as to why the + // read set has a status. + StatusMessage *string `locationName:"statusMessage" min:"1" type:"string"` + // The read set's subject ID. SubjectId *string `locationName:"subjectId" min:"1" type:"string"` } @@ -11639,6 +13111,12 @@ func (s *GetReadSetMetadataOutput) SetStatus(v string) *GetReadSetMetadataOutput return s } +// SetStatusMessage sets the StatusMessage field's value. +func (s *GetReadSetMetadataOutput) SetStatusMessage(v string) *GetReadSetMetadataOutput { + s.StatusMessage = &v + return s +} + // SetSubjectId sets the SubjectId field's value. func (s *GetReadSetMetadataOutput) SetSubjectId(v string) *GetReadSetMetadataOutput { s.SubjectId = &v @@ -12371,6 +13849,9 @@ type GetRunGroupOutput struct { // The group's maximum run time in minutes. MaxDuration *int64 `locationName:"maxDuration" min:"1" type:"integer"` + // The maximum GPUs that can be used by a run group. + MaxGpus *int64 `locationName:"maxGpus" min:"1" type:"integer"` + // The maximum number of concurrent runs for the group. MaxRuns *int64 `locationName:"maxRuns" min:"1" type:"integer"` @@ -12429,6 +13910,12 @@ func (s *GetRunGroupOutput) SetMaxDuration(v int64) *GetRunGroupOutput { return s } +// SetMaxGpus sets the MaxGpus field's value. +func (s *GetRunGroupOutput) SetMaxGpus(v int64) *GetRunGroupOutput { + s.MaxGpus = &v + return s +} + // SetMaxRuns sets the MaxRuns field's value. func (s *GetRunGroupOutput) SetMaxRuns(v int64) *GetRunGroupOutput { s.MaxRuns = &v @@ -12508,6 +13995,9 @@ func (s *GetRunInput) SetId(v string) *GetRunInput { type GetRunOutput struct { _ struct{} `type:"structure"` + // The computational accelerator used to run the workflow. + Accelerators *string `locationName:"accelerators" min:"1" type:"string" enum:"Accelerators"` + // The run's ARN. Arn *string `locationName:"arn" min:"1" type:"string"` @@ -12593,6 +14083,12 @@ func (s GetRunOutput) GoString() string { return s.String() } +// SetAccelerators sets the Accelerators field's value. +func (s *GetRunOutput) SetAccelerators(v string) *GetRunOutput { + s.Accelerators = &v + return s +} + // SetArn sets the Arn field's value. func (s *GetRunOutput) SetArn(v string) *GetRunOutput { s.Arn = &v @@ -12800,6 +14296,9 @@ type GetRunTaskOutput struct { // When the task was created. CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + // The number of Graphics Processing Units (GPU) specified in the task. + Gpus *int64 `locationName:"gpus" type:"integer"` + // The task's log stream. LogStream *string `locationName:"logStream" type:"string"` @@ -12855,6 +14354,12 @@ func (s *GetRunTaskOutput) SetCreationTime(v time.Time) *GetRunTaskOutput { return s } +// SetGpus sets the Gpus field's value. +func (s *GetRunTaskOutput) SetGpus(v int64) *GetRunTaskOutput { + s.Gpus = &v + return s +} + // SetLogStream sets the LogStream field's value. func (s *GetRunTaskOutput) SetLogStream(v string) *GetRunTaskOutput { s.LogStream = &v @@ -12968,6 +14473,9 @@ type GetSequenceStoreOutput struct { // The store's description. Description *string `locationName:"description" min:"1" type:"string"` + // An S3 location that is used to store files that have failed a direct upload. + FallbackLocation *string `locationName:"fallbackLocation" type:"string"` + // The store's ID. // // Id is a required field @@ -13016,6 +14524,12 @@ func (s *GetSequenceStoreOutput) SetDescription(v string) *GetSequenceStoreOutpu return s } +// SetFallbackLocation sets the FallbackLocation field's value. +func (s *GetSequenceStoreOutput) SetFallbackLocation(v string) *GetSequenceStoreOutput { + s.FallbackLocation = &v + return s +} + // SetId sets the Id field's value. func (s *GetSequenceStoreOutput) SetId(v string) *GetSequenceStoreOutput { s.Id = &v @@ -13086,6 +14600,9 @@ func (s *GetVariantImportJobInput) SetJobId(v string) *GetVariantImportJobInput type GetVariantImportJobOutput struct { _ struct{} `type:"structure"` + // The annotation schema generated by the parsed annotation data. + AnnotationFields map[string]*string `locationName:"annotationFields" type:"map"` + // When the job completed. CompletionTime *time.Time `locationName:"completionTime" type:"timestamp" timestampFormat:"iso8601"` @@ -13153,6 +14670,12 @@ func (s GetVariantImportJobOutput) GoString() string { return s.String() } +// SetAnnotationFields sets the AnnotationFields field's value. +func (s *GetVariantImportJobOutput) SetAnnotationFields(v map[string]*string) *GetVariantImportJobOutput { + s.AnnotationFields = v + return s +} + // SetCompletionTime sets the CompletionTime field's value. func (s *GetVariantImportJobOutput) SetCompletionTime(v time.Time) *GetVariantImportJobOutput { s.CompletionTime = &v @@ -13489,6 +15012,9 @@ func (s *GetWorkflowInput) SetType(v string) *GetWorkflowInput { type GetWorkflowOutput struct { _ struct{} `type:"structure"` + // The computational accelerator specified to run the workflow. + Accelerators *string `locationName:"accelerators" min:"1" type:"string" enum:"Accelerators"` + // The workflow's ARN. Arn *string `locationName:"arn" min:"1" type:"string"` @@ -13513,6 +15039,9 @@ type GetWorkflowOutput struct { // The path of the main definition file for the workflow. Main *string `locationName:"main" min:"1" type:"string"` + // Gets metadata for workflow. + Metadata map[string]*string `locationName:"metadata" type:"map"` + // The workflow's name. Name *string `locationName:"name" min:"1" type:"string"` @@ -13553,6 +15082,12 @@ func (s GetWorkflowOutput) GoString() string { return s.String() } +// SetAccelerators sets the Accelerators field's value. +func (s *GetWorkflowOutput) SetAccelerators(v string) *GetWorkflowOutput { + s.Accelerators = &v + return s +} + // SetArn sets the Arn field's value. func (s *GetWorkflowOutput) SetArn(v string) *GetWorkflowOutput { s.Arn = &v @@ -13601,6 +15136,12 @@ func (s *GetWorkflowOutput) SetMain(v string) *GetWorkflowOutput { return s } +// SetMetadata sets the Metadata field's value. +func (s *GetWorkflowOutput) SetMetadata(v map[string]*string) *GetWorkflowOutput { + s.Metadata = v + return s +} + // SetName sets the Name field's value. func (s *GetWorkflowOutput) SetName(v string) *GetWorkflowOutput { s.Name = &v @@ -14501,6 +16042,121 @@ func (s *ListAnnotationStoresOutput) SetNextToken(v string) *ListAnnotationStore return s } +type ListMultipartReadSetUploadsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The maximum number of multipart uploads returned in a page. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // Next token returned in the response of a previous ListMultipartReadSetUploads + // call. Used to get the next page of results. + NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` + + // The Sequence Store ID used for the multipart uploads. + // + // SequenceStoreId is a required field + SequenceStoreId *string `location:"uri" locationName:"sequenceStoreId" min:"10" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartReadSetUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartReadSetUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartReadSetUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartReadSetUploadsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.SequenceStoreId == nil { + invalidParams.Add(request.NewErrParamRequired("SequenceStoreId")) + } + if s.SequenceStoreId != nil && len(*s.SequenceStoreId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("SequenceStoreId", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListMultipartReadSetUploadsInput) SetMaxResults(v int64) *ListMultipartReadSetUploadsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMultipartReadSetUploadsInput) SetNextToken(v string) *ListMultipartReadSetUploadsInput { + s.NextToken = &v + return s +} + +// SetSequenceStoreId sets the SequenceStoreId field's value. +func (s *ListMultipartReadSetUploadsInput) SetSequenceStoreId(v string) *ListMultipartReadSetUploadsInput { + s.SequenceStoreId = &v + return s +} + +type ListMultipartReadSetUploadsOutput struct { + _ struct{} `type:"structure"` + + // Next token returned in the response of a previous ListMultipartReadSetUploads + // call. Used to get the next page of results. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // An array of multipart uploads. + Uploads []*MultipartReadSetUploadListItem `locationName:"uploads" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartReadSetUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartReadSetUploadsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMultipartReadSetUploadsOutput) SetNextToken(v string) *ListMultipartReadSetUploadsOutput { + s.NextToken = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartReadSetUploadsOutput) SetUploads(v []*MultipartReadSetUploadListItem) *ListMultipartReadSetUploadsOutput { + s.Uploads = v + return s +} + type ListReadSetActivationJobsInput struct { _ struct{} `type:"structure"` @@ -14696,22 +16352,145 @@ func (s *ListReadSetExportJobsInput) SetMaxResults(v int64) *ListReadSetExportJo } // SetNextToken sets the NextToken field's value. -func (s *ListReadSetExportJobsInput) SetNextToken(v string) *ListReadSetExportJobsInput { +func (s *ListReadSetExportJobsInput) SetNextToken(v string) *ListReadSetExportJobsInput { + s.NextToken = &v + return s +} + +// SetSequenceStoreId sets the SequenceStoreId field's value. +func (s *ListReadSetExportJobsInput) SetSequenceStoreId(v string) *ListReadSetExportJobsInput { + s.SequenceStoreId = &v + return s +} + +type ListReadSetExportJobsOutput struct { + _ struct{} `type:"structure"` + + // A list of jobs. + ExportJobs []*ExportReadSetJobDetail `locationName:"exportJobs" type:"list"` + + // A pagination token that's included if more results are available. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListReadSetExportJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListReadSetExportJobsOutput) GoString() string { + return s.String() +} + +// SetExportJobs sets the ExportJobs field's value. +func (s *ListReadSetExportJobsOutput) SetExportJobs(v []*ExportReadSetJobDetail) *ListReadSetExportJobsOutput { + s.ExportJobs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListReadSetExportJobsOutput) SetNextToken(v string) *ListReadSetExportJobsOutput { + s.NextToken = &v + return s +} + +type ListReadSetImportJobsInput struct { + _ struct{} `type:"structure"` + + // A filter to apply to the list. + Filter *ImportReadSetFilter `locationName:"filter" type:"structure"` + + // The maximum number of jobs to return in one page of results. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // Specify the pagination token from a previous request to retrieve the next + // page of results. + NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` + + // The jobs' sequence store ID. + // + // SequenceStoreId is a required field + SequenceStoreId *string `location:"uri" locationName:"sequenceStoreId" min:"10" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListReadSetImportJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListReadSetImportJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListReadSetImportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListReadSetImportJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.SequenceStoreId == nil { + invalidParams.Add(request.NewErrParamRequired("SequenceStoreId")) + } + if s.SequenceStoreId != nil && len(*s.SequenceStoreId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("SequenceStoreId", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *ListReadSetImportJobsInput) SetFilter(v *ImportReadSetFilter) *ListReadSetImportJobsInput { + s.Filter = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListReadSetImportJobsInput) SetMaxResults(v int64) *ListReadSetImportJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListReadSetImportJobsInput) SetNextToken(v string) *ListReadSetImportJobsInput { s.NextToken = &v return s } // SetSequenceStoreId sets the SequenceStoreId field's value. -func (s *ListReadSetExportJobsInput) SetSequenceStoreId(v string) *ListReadSetExportJobsInput { +func (s *ListReadSetImportJobsInput) SetSequenceStoreId(v string) *ListReadSetImportJobsInput { s.SequenceStoreId = &v return s } -type ListReadSetExportJobsOutput struct { +type ListReadSetImportJobsOutput struct { _ struct{} `type:"structure"` // A list of jobs. - ExportJobs []*ExportReadSetJobDetail `locationName:"exportJobs" type:"list"` + ImportJobs []*ImportReadSetJobItem `locationName:"importJobs" type:"list"` // A pagination token that's included if more results are available. NextToken *string `locationName:"nextToken" min:"1" type:"string"` @@ -14722,7 +16501,7 @@ type ListReadSetExportJobsOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListReadSetExportJobsOutput) String() string { +func (s ListReadSetImportJobsOutput) String() string { return awsutil.Prettify(s) } @@ -14731,39 +16510,49 @@ func (s ListReadSetExportJobsOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListReadSetExportJobsOutput) GoString() string { +func (s ListReadSetImportJobsOutput) GoString() string { return s.String() } -// SetExportJobs sets the ExportJobs field's value. -func (s *ListReadSetExportJobsOutput) SetExportJobs(v []*ExportReadSetJobDetail) *ListReadSetExportJobsOutput { - s.ExportJobs = v +// SetImportJobs sets the ImportJobs field's value. +func (s *ListReadSetImportJobsOutput) SetImportJobs(v []*ImportReadSetJobItem) *ListReadSetImportJobsOutput { + s.ImportJobs = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListReadSetExportJobsOutput) SetNextToken(v string) *ListReadSetExportJobsOutput { +func (s *ListReadSetImportJobsOutput) SetNextToken(v string) *ListReadSetImportJobsOutput { s.NextToken = &v return s } -type ListReadSetImportJobsInput struct { +type ListReadSetUploadPartsInput struct { _ struct{} `type:"structure"` - // A filter to apply to the list. - Filter *ImportReadSetFilter `locationName:"filter" type:"structure"` + // Attributes used to filter for a specific subset of read set part uploads. + Filter *ReadSetUploadPartListFilter `locationName:"filter" type:"structure"` - // The maximum number of jobs to return in one page of results. + // The maximum number of read set upload parts returned in a page. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // Specify the pagination token from a previous request to retrieve the next - // page of results. + // Next token returned in the response of a previous ListReadSetUploadPartsRequest + // call. Used to get the next page of results. NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` - // The jobs' sequence store ID. + // The source file for the upload part. + // + // PartSource is a required field + PartSource *string `locationName:"partSource" type:"string" required:"true" enum:"ReadSetPartSource"` + + // The Sequence Store ID used for the multipart uploads. // // SequenceStoreId is a required field SequenceStoreId *string `location:"uri" locationName:"sequenceStoreId" min:"10" type:"string" required:"true"` + + // The ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `location:"uri" locationName:"uploadId" min:"10" type:"string" required:"true"` } // String returns the string representation. @@ -14771,7 +16560,7 @@ type ListReadSetImportJobsInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListReadSetImportJobsInput) String() string { +func (s ListReadSetUploadPartsInput) String() string { return awsutil.Prettify(s) } @@ -14780,25 +16569,34 @@ func (s ListReadSetImportJobsInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListReadSetImportJobsInput) GoString() string { +func (s ListReadSetUploadPartsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListReadSetImportJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListReadSetImportJobsInput"} +func (s *ListReadSetUploadPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListReadSetUploadPartsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if s.NextToken != nil && len(*s.NextToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } + if s.PartSource == nil { + invalidParams.Add(request.NewErrParamRequired("PartSource")) + } if s.SequenceStoreId == nil { invalidParams.Add(request.NewErrParamRequired("SequenceStoreId")) } if s.SequenceStoreId != nil && len(*s.SequenceStoreId) < 10 { invalidParams.Add(request.NewErrParamMinLen("SequenceStoreId", 10)) } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + if s.UploadId != nil && len(*s.UploadId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("UploadId", 10)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14807,37 +16605,50 @@ func (s *ListReadSetImportJobsInput) Validate() error { } // SetFilter sets the Filter field's value. -func (s *ListReadSetImportJobsInput) SetFilter(v *ImportReadSetFilter) *ListReadSetImportJobsInput { +func (s *ListReadSetUploadPartsInput) SetFilter(v *ReadSetUploadPartListFilter) *ListReadSetUploadPartsInput { s.Filter = v return s } // SetMaxResults sets the MaxResults field's value. -func (s *ListReadSetImportJobsInput) SetMaxResults(v int64) *ListReadSetImportJobsInput { +func (s *ListReadSetUploadPartsInput) SetMaxResults(v int64) *ListReadSetUploadPartsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListReadSetImportJobsInput) SetNextToken(v string) *ListReadSetImportJobsInput { +func (s *ListReadSetUploadPartsInput) SetNextToken(v string) *ListReadSetUploadPartsInput { s.NextToken = &v return s } +// SetPartSource sets the PartSource field's value. +func (s *ListReadSetUploadPartsInput) SetPartSource(v string) *ListReadSetUploadPartsInput { + s.PartSource = &v + return s +} + // SetSequenceStoreId sets the SequenceStoreId field's value. -func (s *ListReadSetImportJobsInput) SetSequenceStoreId(v string) *ListReadSetImportJobsInput { +func (s *ListReadSetUploadPartsInput) SetSequenceStoreId(v string) *ListReadSetUploadPartsInput { s.SequenceStoreId = &v return s } -type ListReadSetImportJobsOutput struct { - _ struct{} `type:"structure"` +// SetUploadId sets the UploadId field's value. +func (s *ListReadSetUploadPartsInput) SetUploadId(v string) *ListReadSetUploadPartsInput { + s.UploadId = &v + return s +} - // A list of jobs. - ImportJobs []*ImportReadSetJobItem `locationName:"importJobs" type:"list"` +type ListReadSetUploadPartsOutput struct { + _ struct{} `type:"structure"` - // A pagination token that's included if more results are available. + // Next token returned in the response of a previous ListReadSetUploadParts + // call. Used to get the next page of results. NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // An array of upload parts. + Parts []*ReadSetUploadPartListItem `locationName:"parts" type:"list"` } // String returns the string representation. @@ -14845,7 +16656,7 @@ type ListReadSetImportJobsOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListReadSetImportJobsOutput) String() string { +func (s ListReadSetUploadPartsOutput) String() string { return awsutil.Prettify(s) } @@ -14854,19 +16665,19 @@ func (s ListReadSetImportJobsOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListReadSetImportJobsOutput) GoString() string { +func (s ListReadSetUploadPartsOutput) GoString() string { return s.String() } -// SetImportJobs sets the ImportJobs field's value. -func (s *ListReadSetImportJobsOutput) SetImportJobs(v []*ImportReadSetJobItem) *ListReadSetImportJobsOutput { - s.ImportJobs = v +// SetNextToken sets the NextToken field's value. +func (s *ListReadSetUploadPartsOutput) SetNextToken(v string) *ListReadSetUploadPartsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListReadSetImportJobsOutput) SetNextToken(v string) *ListReadSetImportJobsOutput { - s.NextToken = &v +// SetParts sets the Parts field's value. +func (s *ListReadSetUploadPartsOutput) SetParts(v []*ReadSetUploadPartListItem) *ListReadSetUploadPartsOutput { + s.Parts = v return s } @@ -15616,6 +17427,9 @@ type ListRunsInput struct { // Specify the pagination token from a previous request to retrieve the next // page of results. StartingToken *string `location:"querystring" locationName:"startingToken" min:"1" type:"string"` + + // The status of a run. + Status *string `location:"querystring" locationName:"status" min:"1" type:"string" enum:"RunStatus"` } // String returns the string representation. @@ -15651,6 +17465,9 @@ func (s *ListRunsInput) Validate() error { if s.StartingToken != nil && len(*s.StartingToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("StartingToken", 1)) } + if s.Status != nil && len(*s.Status) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Status", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -15682,6 +17499,12 @@ func (s *ListRunsInput) SetStartingToken(v string) *ListRunsInput { return s } +// SetStatus sets the Status field's value. +func (s *ListRunsInput) SetStatus(v string) *ListRunsInput { + s.Status = &v + return s +} + type ListRunsOutput struct { _ struct{} `type:"structure"` @@ -16347,6 +18170,209 @@ func (s *ListWorkflowsOutput) SetNextToken(v string) *ListWorkflowsOutput { return s } +// Part of the response to ListMultipartReadSetUploads, excluding completed +// and aborted multipart uploads. +type MultipartReadSetUploadListItem struct { + _ struct{} `type:"structure"` + + // The time stamp for when a direct upload was created. + // + // CreationTime is a required field + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The description of a read set. + Description *string `locationName:"description" min:"1" type:"string"` + + // The source of an uploaded part. + // + // GeneratedFrom is a required field + GeneratedFrom *string `locationName:"generatedFrom" min:"1" type:"string" required:"true"` + + // The name of a read set. + Name *string `locationName:"name" min:"1" type:"string"` + + // The source's reference ARN. + // + // ReferenceArn is a required field + ReferenceArn *string `locationName:"referenceArn" min:"1" type:"string" required:"true"` + + // The read set source's sample ID. + // + // SampleId is a required field + SampleId *string `locationName:"sampleId" min:"1" type:"string" required:"true"` + + // The sequence store ID used for the multipart upload. + // + // SequenceStoreId is a required field + SequenceStoreId *string `locationName:"sequenceStoreId" min:"10" type:"string" required:"true"` + + // The type of file the read set originated from. + // + // SourceFileType is a required field + SourceFileType *string `locationName:"sourceFileType" type:"string" required:"true" enum:"FileType"` + + // The read set source's subject ID. + // + // SubjectId is a required field + SubjectId *string `locationName:"subjectId" min:"1" type:"string" required:"true"` + + // Any tags you wish to add to a read set. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `locationName:"uploadId" min:"10" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MultipartReadSetUploadListItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MultipartReadSetUploadListItem) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *MultipartReadSetUploadListItem) SetCreationTime(v time.Time) *MultipartReadSetUploadListItem { + s.CreationTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *MultipartReadSetUploadListItem) SetDescription(v string) *MultipartReadSetUploadListItem { + s.Description = &v + return s +} + +// SetGeneratedFrom sets the GeneratedFrom field's value. +func (s *MultipartReadSetUploadListItem) SetGeneratedFrom(v string) *MultipartReadSetUploadListItem { + s.GeneratedFrom = &v + return s +} + +// SetName sets the Name field's value. +func (s *MultipartReadSetUploadListItem) SetName(v string) *MultipartReadSetUploadListItem { + s.Name = &v + return s +} + +// SetReferenceArn sets the ReferenceArn field's value. +func (s *MultipartReadSetUploadListItem) SetReferenceArn(v string) *MultipartReadSetUploadListItem { + s.ReferenceArn = &v + return s +} + +// SetSampleId sets the SampleId field's value. +func (s *MultipartReadSetUploadListItem) SetSampleId(v string) *MultipartReadSetUploadListItem { + s.SampleId = &v + return s +} + +// SetSequenceStoreId sets the SequenceStoreId field's value. +func (s *MultipartReadSetUploadListItem) SetSequenceStoreId(v string) *MultipartReadSetUploadListItem { + s.SequenceStoreId = &v + return s +} + +// SetSourceFileType sets the SourceFileType field's value. +func (s *MultipartReadSetUploadListItem) SetSourceFileType(v string) *MultipartReadSetUploadListItem { + s.SourceFileType = &v + return s +} + +// SetSubjectId sets the SubjectId field's value. +func (s *MultipartReadSetUploadListItem) SetSubjectId(v string) *MultipartReadSetUploadListItem { + s.SubjectId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MultipartReadSetUploadListItem) SetTags(v map[string]*string) *MultipartReadSetUploadListItem { + s.Tags = v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartReadSetUploadListItem) SetUploadId(v string) *MultipartReadSetUploadListItem { + s.UploadId = &v + return s +} + +// The operation is not supported by Amazon Omics, or the API does not exist. +type NotSupportedOperationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotSupportedOperationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotSupportedOperationException) GoString() string { + return s.String() +} + +func newErrorNotSupportedOperationException(v protocol.ResponseMetadata) error { + return &NotSupportedOperationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *NotSupportedOperationException) Code() string { + return "NotSupportedOperationException" +} + +// Message returns the exception's message. +func (s *NotSupportedOperationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *NotSupportedOperationException) OrigErr() error { + return nil +} + +func (s *NotSupportedOperationException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *NotSupportedOperationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *NotSupportedOperationException) RequestID() string { + return s.RespMetadata.RequestID +} + // The ranges specified in the request are not valid. type RangeNotSatisfiableException struct { _ struct{} `type:"structure"` @@ -16659,14 +18685,23 @@ type ReadSetFilter struct { // The filter's end date. CreatedBefore *time.Time `locationName:"createdBefore" type:"timestamp" timestampFormat:"iso8601"` + // Where the source originated. + GeneratedFrom *string `locationName:"generatedFrom" min:"1" type:"string"` + // A name to filter on. Name *string `locationName:"name" min:"1" type:"string"` // A genome reference ARN to filter on. ReferenceArn *string `locationName:"referenceArn" min:"1" type:"string"` + // The read set source's sample ID. + SampleId *string `locationName:"sampleId" min:"1" type:"string"` + // A status to filter on. Status *string `locationName:"status" type:"string" enum:"ReadSetStatus"` + + // The read set source's subject ID. + SubjectId *string `locationName:"subjectId" min:"1" type:"string"` } // String returns the string representation. @@ -16690,12 +18725,21 @@ func (s ReadSetFilter) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ReadSetFilter) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ReadSetFilter"} + if s.GeneratedFrom != nil && len(*s.GeneratedFrom) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GeneratedFrom", 1)) + } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if s.ReferenceArn != nil && len(*s.ReferenceArn) < 1 { invalidParams.Add(request.NewErrParamMinLen("ReferenceArn", 1)) } + if s.SampleId != nil && len(*s.SampleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SampleId", 1)) + } + if s.SubjectId != nil && len(*s.SubjectId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubjectId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -16715,6 +18759,12 @@ func (s *ReadSetFilter) SetCreatedBefore(v time.Time) *ReadSetFilter { return s } +// SetGeneratedFrom sets the GeneratedFrom field's value. +func (s *ReadSetFilter) SetGeneratedFrom(v string) *ReadSetFilter { + s.GeneratedFrom = &v + return s +} + // SetName sets the Name field's value. func (s *ReadSetFilter) SetName(v string) *ReadSetFilter { s.Name = &v @@ -16727,12 +18777,24 @@ func (s *ReadSetFilter) SetReferenceArn(v string) *ReadSetFilter { return s } +// SetSampleId sets the SampleId field's value. +func (s *ReadSetFilter) SetSampleId(v string) *ReadSetFilter { + s.SampleId = &v + return s +} + // SetStatus sets the Status field's value. func (s *ReadSetFilter) SetStatus(v string) *ReadSetFilter { s.Status = &v return s } +// SetSubjectId sets the SubjectId field's value. +func (s *ReadSetFilter) SetSubjectId(v string) *ReadSetFilter { + s.SubjectId = &v + return s +} + // A read set. type ReadSetListItem struct { _ struct{} `type:"structure"` @@ -16782,6 +18844,10 @@ type ReadSetListItem struct { // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"ReadSetStatus"` + // The status for a read set. It provides more detail as to why the read set + // has a status. + StatusMessage *string `locationName:"statusMessage" min:"1" type:"string"` + // The read set's subject ID. SubjectId *string `locationName:"subjectId" min:"1" type:"string"` } @@ -16870,12 +18936,147 @@ func (s *ReadSetListItem) SetStatus(v string) *ReadSetListItem { return s } +// SetStatusMessage sets the StatusMessage field's value. +func (s *ReadSetListItem) SetStatusMessage(v string) *ReadSetListItem { + s.StatusMessage = &v + return s +} + // SetSubjectId sets the SubjectId field's value. func (s *ReadSetListItem) SetSubjectId(v string) *ReadSetListItem { s.SubjectId = &v return s } +// Filter settings that select for read set upload parts of interest. +type ReadSetUploadPartListFilter struct { + _ struct{} `type:"structure"` + + // Filters for read set uploads after a specified time. + CreatedAfter *time.Time `locationName:"createdAfter" type:"timestamp" timestampFormat:"iso8601"` + + // Filters for read set part uploads before a specified time. + CreatedBefore *time.Time `locationName:"createdBefore" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReadSetUploadPartListFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReadSetUploadPartListFilter) GoString() string { + return s.String() +} + +// SetCreatedAfter sets the CreatedAfter field's value. +func (s *ReadSetUploadPartListFilter) SetCreatedAfter(v time.Time) *ReadSetUploadPartListFilter { + s.CreatedAfter = &v + return s +} + +// SetCreatedBefore sets the CreatedBefore field's value. +func (s *ReadSetUploadPartListFilter) SetCreatedBefore(v time.Time) *ReadSetUploadPartListFilter { + s.CreatedBefore = &v + return s +} + +// The metadata of a single part of a file that was added to a multipart upload. +// A list of these parts is returned in the response to the ListReadSetUploadParts +// API. +type ReadSetUploadPartListItem struct { + _ struct{} `type:"structure"` + + // A unique identifier used to confirm that parts are being added to the correct + // upload. + // + // Checksum is a required field + Checksum *string `locationName:"checksum" type:"string" required:"true"` + + // The time stamp for when a direct upload was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + + // The time stamp for the most recent update to an uploaded part. + LastUpdatedTime *time.Time `locationName:"lastUpdatedTime" type:"timestamp" timestampFormat:"iso8601"` + + // The number identifying the part in an upload. + // + // PartNumber is a required field + PartNumber *int64 `locationName:"partNumber" min:"1" type:"integer" required:"true"` + + // The size of the the part in an upload. + // + // PartSize is a required field + PartSize *int64 `locationName:"partSize" min:"1" type:"long" required:"true"` + + // The origin of the part being direct uploaded. + // + // PartSource is a required field + PartSource *string `locationName:"partSource" type:"string" required:"true" enum:"ReadSetPartSource"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReadSetUploadPartListItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReadSetUploadPartListItem) GoString() string { + return s.String() +} + +// SetChecksum sets the Checksum field's value. +func (s *ReadSetUploadPartListItem) SetChecksum(v string) *ReadSetUploadPartListItem { + s.Checksum = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ReadSetUploadPartListItem) SetCreationTime(v time.Time) *ReadSetUploadPartListItem { + s.CreationTime = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *ReadSetUploadPartListItem) SetLastUpdatedTime(v time.Time) *ReadSetUploadPartListItem { + s.LastUpdatedTime = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *ReadSetUploadPartListItem) SetPartNumber(v int64) *ReadSetUploadPartListItem { + s.PartNumber = &v + return s +} + +// SetPartSize sets the PartSize field's value. +func (s *ReadSetUploadPartListItem) SetPartSize(v int64) *ReadSetUploadPartListItem { + s.PartSize = &v + return s +} + +// SetPartSource sets the PartSource field's value. +func (s *ReadSetUploadPartListItem) SetPartSource(v string) *ReadSetUploadPartListItem { + s.PartSource = &v + return s +} + // A set of genome reference files. type ReferenceFiles struct { _ struct{} `type:"structure"` @@ -17446,6 +19647,9 @@ type RunGroupListItem struct { // The group's maximum duration setting in minutes. MaxDuration *int64 `locationName:"maxDuration" min:"1" type:"integer"` + // The maximum GPUs that can be used by a run group. + MaxGpus *int64 `locationName:"maxGpus" min:"1" type:"integer"` + // The group's maximum concurrent run setting. MaxRuns *int64 `locationName:"maxRuns" min:"1" type:"integer"` @@ -17501,6 +19705,12 @@ func (s *RunGroupListItem) SetMaxDuration(v int64) *RunGroupListItem { return s } +// SetMaxGpus sets the MaxGpus field's value. +func (s *RunGroupListItem) SetMaxGpus(v int64) *RunGroupListItem { + s.MaxGpus = &v + return s +} + // SetMaxRuns sets the MaxRuns field's value. func (s *RunGroupListItem) SetMaxRuns(v int64) *RunGroupListItem { s.MaxRuns = &v @@ -17702,6 +19912,9 @@ type SequenceStoreDetail struct { // The store's description. Description *string `locationName:"description" min:"1" type:"string"` + // An S3 location that is used to store files that have failed a direct upload. + FallbackLocation *string `locationName:"fallbackLocation" type:"string"` + // The store's ID. // // Id is a required field @@ -17750,6 +19963,12 @@ func (s *SequenceStoreDetail) SetDescription(v string) *SequenceStoreDetail { return s } +// SetFallbackLocation sets the FallbackLocation field's value. +func (s *SequenceStoreDetail) SetFallbackLocation(v string) *SequenceStoreDetail { + s.FallbackLocation = &v + return s +} + // SetId sets the Id field's value. func (s *SequenceStoreDetail) SetId(v string) *SequenceStoreDetail { s.Id = &v @@ -18013,6 +20232,9 @@ func (s *SseConfig) SetType(v string) *SseConfig { type StartAnnotationImportJobInput struct { _ struct{} `type:"structure"` + // The annotation schema generated by the parsed annotation data. + AnnotationFields map[string]*string `locationName:"annotationFields" type:"map"` + // A destination annotation store for the job. // // DestinationName is a required field @@ -18096,6 +20318,12 @@ func (s *StartAnnotationImportJobInput) Validate() error { return nil } +// SetAnnotationFields sets the AnnotationFields field's value. +func (s *StartAnnotationImportJobInput) SetAnnotationFields(v map[string]*string) *StartAnnotationImportJobInput { + s.AnnotationFields = v + return s +} + // SetDestinationName sets the DestinationName field's value. func (s *StartAnnotationImportJobInput) SetDestinationName(v string) *StartAnnotationImportJobInput { s.DestinationName = &v @@ -19408,6 +21636,9 @@ func (s *StartRunOutput) SetTags(v map[string]*string) *StartRunOutput { type StartVariantImportJobInput struct { _ struct{} `type:"structure"` + // The annotation schema generated by the parsed annotation data. + AnnotationFields map[string]*string `locationName:"annotationFields" type:"map"` + // The destination variant store for the job. // // DestinationName is a required field @@ -19483,6 +21714,12 @@ func (s *StartVariantImportJobInput) Validate() error { return nil } +// SetAnnotationFields sets the AnnotationFields field's value. +func (s *StartVariantImportJobInput) SetAnnotationFields(v map[string]*string) *StartVariantImportJobInput { + s.AnnotationFields = v + return s +} + // SetDestinationName sets the DestinationName field's value. func (s *StartVariantImportJobInput) SetDestinationName(v string) *StartVariantImportJobInput { s.DestinationName = &v @@ -19682,6 +21919,9 @@ type TaskListItem struct { // When the task was created. CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + // The number of Graphics Processing Units (GPU) specified for the task. + Gpus *int64 `locationName:"gpus" type:"integer"` + // The task's memory use in gigabyes. Memory *int64 `locationName:"memory" min:"1" type:"integer"` @@ -19731,6 +21971,12 @@ func (s *TaskListItem) SetCreationTime(v time.Time) *TaskListItem { return s } +// SetGpus sets the Gpus field's value. +func (s *TaskListItem) SetGpus(v int64) *TaskListItem { + s.Gpus = &v + return s +} + // SetMemory sets the Memory field's value. func (s *TaskListItem) SetMemory(v int64) *TaskListItem { s.Memory = &v @@ -20215,6 +22461,9 @@ type UpdateRunGroupInput struct { // A maximum run time for the group in minutes. MaxDuration *int64 `locationName:"maxDuration" min:"1" type:"integer"` + // The maximum GPUs that can be used by a run group. + MaxGpus *int64 `locationName:"maxGpus" min:"1" type:"integer"` + // The maximum number of concurrent runs for the group. MaxRuns *int64 `locationName:"maxRuns" min:"1" type:"integer"` @@ -20255,6 +22504,9 @@ func (s *UpdateRunGroupInput) Validate() error { if s.MaxDuration != nil && *s.MaxDuration < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxDuration", 1)) } + if s.MaxGpus != nil && *s.MaxGpus < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxGpus", 1)) + } if s.MaxRuns != nil && *s.MaxRuns < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxRuns", 1)) } @@ -20286,6 +22538,12 @@ func (s *UpdateRunGroupInput) SetMaxDuration(v int64) *UpdateRunGroupInput { return s } +// SetMaxGpus sets the MaxGpus field's value. +func (s *UpdateRunGroupInput) SetMaxGpus(v int64) *UpdateRunGroupInput { + s.MaxGpus = &v + return s +} + // SetMaxRuns sets the MaxRuns field's value. func (s *UpdateRunGroupInput) SetMaxRuns(v int64) *UpdateRunGroupInput { s.MaxRuns = &v @@ -20572,6 +22830,156 @@ func (s UpdateWorkflowOutput) GoString() string { return s.String() } +type UploadReadSetPartInput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The number of the part being uploaded. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" min:"1" type:"integer" required:"true"` + + // The source file for an upload part. + // + // PartSource is a required field + PartSource *string `location:"querystring" locationName:"partSource" type:"string" required:"true" enum:"ReadSetPartSource"` + + // The read set data to upload for a part. + // + // To use an non-seekable io.Reader for this request wrap the io.Reader with + // "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable + // readers. This will allow the SDK to send the reader's payload as chunked + // transfer encoding. + // + // Payload is a required field + Payload io.ReadSeeker `locationName:"payload" type:"blob" required:"true"` + + // The Sequence Store ID used for the multipart upload. + // + // SequenceStoreId is a required field + SequenceStoreId *string `location:"uri" locationName:"sequenceStoreId" min:"10" type:"string" required:"true"` + + // The ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `location:"uri" locationName:"uploadId" min:"10" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadReadSetPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadReadSetPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadReadSetPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadReadSetPartInput"} + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.PartNumber != nil && *s.PartNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("PartNumber", 1)) + } + if s.PartSource == nil { + invalidParams.Add(request.NewErrParamRequired("PartSource")) + } + if s.Payload == nil { + invalidParams.Add(request.NewErrParamRequired("Payload")) + } + if s.SequenceStoreId == nil { + invalidParams.Add(request.NewErrParamRequired("SequenceStoreId")) + } + if s.SequenceStoreId != nil && len(*s.SequenceStoreId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("SequenceStoreId", 10)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + if s.UploadId != nil && len(*s.UploadId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("UploadId", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadReadSetPartInput) SetPartNumber(v int64) *UploadReadSetPartInput { + s.PartNumber = &v + return s +} + +// SetPartSource sets the PartSource field's value. +func (s *UploadReadSetPartInput) SetPartSource(v string) *UploadReadSetPartInput { + s.PartSource = &v + return s +} + +// SetPayload sets the Payload field's value. +func (s *UploadReadSetPartInput) SetPayload(v io.ReadSeeker) *UploadReadSetPartInput { + s.Payload = v + return s +} + +// SetSequenceStoreId sets the SequenceStoreId field's value. +func (s *UploadReadSetPartInput) SetSequenceStoreId(v string) *UploadReadSetPartInput { + s.SequenceStoreId = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadReadSetPartInput) SetUploadId(v string) *UploadReadSetPartInput { + s.UploadId = &v + return s +} + +type UploadReadSetPartOutput struct { + _ struct{} `type:"structure"` + + // An identifier used to confirm that parts are being added to the intended + // upload. + // + // Checksum is a required field + Checksum *string `locationName:"checksum" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadReadSetPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadReadSetPartOutput) GoString() string { + return s.String() +} + +// SetChecksum sets the Checksum field's value. +func (s *UploadReadSetPartOutput) SetChecksum(v string) *UploadReadSetPartOutput { + s.Checksum = &v + return s +} + // The input fails to satisfy the constraints specified by an AWS service. type ValidationException struct { _ struct{} `type:"structure"` @@ -20741,6 +23149,9 @@ func (s *VariantImportItemSource) SetSource(v string) *VariantImportItemSource { type VariantImportJobItem struct { _ struct{} `type:"structure"` + // The annotation schema generated by the parsed annotation data. + AnnotationFields map[string]*string `locationName:"annotationFields" type:"map"` + // When the job completed. CompletionTime *time.Time `locationName:"completionTime" type:"timestamp" timestampFormat:"iso8601"` @@ -20796,6 +23207,12 @@ func (s VariantImportJobItem) GoString() string { return s.String() } +// SetAnnotationFields sets the AnnotationFields field's value. +func (s *VariantImportJobItem) SetAnnotationFields(v map[string]*string) *VariantImportJobItem { + s.AnnotationFields = v + return s +} + // SetCompletionTime sets the CompletionTime field's value. func (s *VariantImportJobItem) SetCompletionTime(v time.Time) *VariantImportJobItem { s.CompletionTime = &v @@ -21045,6 +23462,10 @@ type WorkflowListItem struct { // The workflow's ID. Id *string `locationName:"id" min:"1" type:"string"` + // Any metadata available for workflow. The information listed may vary depending + // on the workflow, and there may also be no metadata to return. + Metadata map[string]*string `locationName:"metadata" type:"map"` + // The workflow's name. Name *string `locationName:"name" min:"1" type:"string"` @@ -21097,6 +23518,12 @@ func (s *WorkflowListItem) SetId(v string) *WorkflowListItem { return s } +// SetMetadata sets the Metadata field's value. +func (s *WorkflowListItem) SetMetadata(v map[string]*string) *WorkflowListItem { + s.Metadata = v + return s +} + // SetName sets the Name field's value. func (s *WorkflowListItem) SetName(v string) *WorkflowListItem { s.Name = &v @@ -21156,6 +23583,18 @@ func (s *WorkflowParameter) SetOptional(v bool) *WorkflowParameter { return s } +const ( + // AcceleratorsGpu is a Accelerators enum value + AcceleratorsGpu = "GPU" +) + +// Accelerators_Values returns all elements of the Accelerators enum +func Accelerators_Values() []string { + return []string{ + AcceleratorsGpu, + } +} + const ( // AnnotationTypeGeneric is a AnnotationType enum value AnnotationTypeGeneric = "GENERIC" @@ -21488,6 +23927,22 @@ func ReadSetImportJobStatus_Values() []string { } } +const ( + // ReadSetPartSourceSource1 is a ReadSetPartSource enum value + ReadSetPartSourceSource1 = "SOURCE1" + + // ReadSetPartSourceSource2 is a ReadSetPartSource enum value + ReadSetPartSourceSource2 = "SOURCE2" +) + +// ReadSetPartSource_Values returns all elements of the ReadSetPartSource enum +func ReadSetPartSource_Values() []string { + return []string{ + ReadSetPartSourceSource1, + ReadSetPartSourceSource2, + } +} + const ( // ReadSetStatusArchived is a ReadSetStatus enum value ReadSetStatusArchived = "ARCHIVED" @@ -21503,6 +23958,12 @@ const ( // ReadSetStatusDeleted is a ReadSetStatus enum value ReadSetStatusDeleted = "DELETED" + + // ReadSetStatusProcessingUpload is a ReadSetStatus enum value + ReadSetStatusProcessingUpload = "PROCESSING_UPLOAD" + + // ReadSetStatusUploadFailed is a ReadSetStatus enum value + ReadSetStatusUploadFailed = "UPLOAD_FAILED" ) // ReadSetStatus_Values returns all elements of the ReadSetStatus enum @@ -21513,6 +23974,8 @@ func ReadSetStatus_Values() []string { ReadSetStatusActive, ReadSetStatusDeleting, ReadSetStatusDeleted, + ReadSetStatusProcessingUpload, + ReadSetStatusUploadFailed, } } @@ -21847,6 +24310,9 @@ const ( // WorkflowStatusFailed is a WorkflowStatus enum value WorkflowStatusFailed = "FAILED" + + // WorkflowStatusInactive is a WorkflowStatus enum value + WorkflowStatusInactive = "INACTIVE" ) // WorkflowStatus_Values returns all elements of the WorkflowStatus enum @@ -21857,17 +24323,22 @@ func WorkflowStatus_Values() []string { WorkflowStatusUpdating, WorkflowStatusDeleted, WorkflowStatusFailed, + WorkflowStatusInactive, } } const ( // WorkflowTypePrivate is a WorkflowType enum value WorkflowTypePrivate = "PRIVATE" + + // WorkflowTypeReady2run is a WorkflowType enum value + WorkflowTypeReady2run = "READY2RUN" ) // WorkflowType_Values returns all elements of the WorkflowType enum func WorkflowType_Values() []string { return []string{ WorkflowTypePrivate, + WorkflowTypeReady2run, } } diff --git a/service/omics/doc.go b/service/omics/doc.go index 0aeadf0fb41..09eec2ac402 100644 --- a/service/omics/doc.go +++ b/service/omics/doc.go @@ -5,7 +5,7 @@ // // This is the Amazon Omics API Reference. For an introduction to the service, // see What is Amazon Omics? (https://docs.aws.amazon.com/omics/latest/dev/) -// in the Amazon Omics Developer Guide. +// in the Amazon Omics User Guide. // // See https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28 for more information on this service. // diff --git a/service/omics/errors.go b/service/omics/errors.go index 296f30c661e..7b40661a53e 100644 --- a/service/omics/errors.go +++ b/service/omics/errors.go @@ -26,6 +26,12 @@ const ( // An unexpected error occurred. Try the request again. ErrCodeInternalServerException = "InternalServerException" + // ErrCodeNotSupportedOperationException for service response error code + // "NotSupportedOperationException". + // + // The operation is not supported by Amazon Omics, or the API does not exist. + ErrCodeNotSupportedOperationException = "NotSupportedOperationException" + // ErrCodeRangeNotSatisfiableException for service response error code // "RangeNotSatisfiableException". // @@ -64,13 +70,14 @@ const ( ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "AccessDeniedException": newErrorAccessDeniedException, - "ConflictException": newErrorConflictException, - "InternalServerException": newErrorInternalServerException, - "RangeNotSatisfiableException": newErrorRangeNotSatisfiableException, - "RequestTimeoutException": newErrorRequestTimeoutException, - "ResourceNotFoundException": newErrorResourceNotFoundException, - "ServiceQuotaExceededException": newErrorServiceQuotaExceededException, - "ThrottlingException": newErrorThrottlingException, - "ValidationException": newErrorValidationException, + "AccessDeniedException": newErrorAccessDeniedException, + "ConflictException": newErrorConflictException, + "InternalServerException": newErrorInternalServerException, + "NotSupportedOperationException": newErrorNotSupportedOperationException, + "RangeNotSatisfiableException": newErrorRangeNotSatisfiableException, + "RequestTimeoutException": newErrorRequestTimeoutException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ServiceQuotaExceededException": newErrorServiceQuotaExceededException, + "ThrottlingException": newErrorThrottlingException, + "ValidationException": newErrorValidationException, } diff --git a/service/omics/omicsiface/interface.go b/service/omics/omicsiface/interface.go index b170a7c5d81..4e9ce974e4f 100644 --- a/service/omics/omicsiface/interface.go +++ b/service/omics/omicsiface/interface.go @@ -26,7 +26,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // Amazon Omics. // func myFunc(svc omicsiface.OmicsAPI) bool { -// // Make svc.BatchDeleteReadSet request +// // Make svc.AbortMultipartReadSetUpload request // } // // func main() { @@ -42,7 +42,7 @@ import ( // type mockOmicsClient struct { // omicsiface.OmicsAPI // } -// func (m *mockOmicsClient) BatchDeleteReadSet(input *omics.BatchDeleteReadSetInput) (*omics.BatchDeleteReadSetOutput, error) { +// func (m *mockOmicsClient) AbortMultipartReadSetUpload(input *omics.AbortMultipartReadSetUploadInput) (*omics.AbortMultipartReadSetUploadOutput, error) { // // mock response/functionality // } // @@ -60,6 +60,10 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type OmicsAPI interface { + AbortMultipartReadSetUpload(*omics.AbortMultipartReadSetUploadInput) (*omics.AbortMultipartReadSetUploadOutput, error) + AbortMultipartReadSetUploadWithContext(aws.Context, *omics.AbortMultipartReadSetUploadInput, ...request.Option) (*omics.AbortMultipartReadSetUploadOutput, error) + AbortMultipartReadSetUploadRequest(*omics.AbortMultipartReadSetUploadInput) (*request.Request, *omics.AbortMultipartReadSetUploadOutput) + BatchDeleteReadSet(*omics.BatchDeleteReadSetInput) (*omics.BatchDeleteReadSetOutput, error) BatchDeleteReadSetWithContext(aws.Context, *omics.BatchDeleteReadSetInput, ...request.Option) (*omics.BatchDeleteReadSetOutput, error) BatchDeleteReadSetRequest(*omics.BatchDeleteReadSetInput) (*request.Request, *omics.BatchDeleteReadSetOutput) @@ -76,10 +80,18 @@ type OmicsAPI interface { CancelVariantImportJobWithContext(aws.Context, *omics.CancelVariantImportJobInput, ...request.Option) (*omics.CancelVariantImportJobOutput, error) CancelVariantImportJobRequest(*omics.CancelVariantImportJobInput) (*request.Request, *omics.CancelVariantImportJobOutput) + CompleteMultipartReadSetUpload(*omics.CompleteMultipartReadSetUploadInput) (*omics.CompleteMultipartReadSetUploadOutput, error) + CompleteMultipartReadSetUploadWithContext(aws.Context, *omics.CompleteMultipartReadSetUploadInput, ...request.Option) (*omics.CompleteMultipartReadSetUploadOutput, error) + CompleteMultipartReadSetUploadRequest(*omics.CompleteMultipartReadSetUploadInput) (*request.Request, *omics.CompleteMultipartReadSetUploadOutput) + CreateAnnotationStore(*omics.CreateAnnotationStoreInput) (*omics.CreateAnnotationStoreOutput, error) CreateAnnotationStoreWithContext(aws.Context, *omics.CreateAnnotationStoreInput, ...request.Option) (*omics.CreateAnnotationStoreOutput, error) CreateAnnotationStoreRequest(*omics.CreateAnnotationStoreInput) (*request.Request, *omics.CreateAnnotationStoreOutput) + CreateMultipartReadSetUpload(*omics.CreateMultipartReadSetUploadInput) (*omics.CreateMultipartReadSetUploadOutput, error) + CreateMultipartReadSetUploadWithContext(aws.Context, *omics.CreateMultipartReadSetUploadInput, ...request.Option) (*omics.CreateMultipartReadSetUploadOutput, error) + CreateMultipartReadSetUploadRequest(*omics.CreateMultipartReadSetUploadInput) (*request.Request, *omics.CreateMultipartReadSetUploadOutput) + CreateReferenceStore(*omics.CreateReferenceStoreInput) (*omics.CreateReferenceStoreOutput, error) CreateReferenceStoreWithContext(aws.Context, *omics.CreateReferenceStoreInput, ...request.Option) (*omics.CreateReferenceStoreOutput, error) CreateReferenceStoreRequest(*omics.CreateReferenceStoreInput) (*request.Request, *omics.CreateReferenceStoreOutput) @@ -218,6 +230,13 @@ type OmicsAPI interface { ListAnnotationStoresPages(*omics.ListAnnotationStoresInput, func(*omics.ListAnnotationStoresOutput, bool) bool) error ListAnnotationStoresPagesWithContext(aws.Context, *omics.ListAnnotationStoresInput, func(*omics.ListAnnotationStoresOutput, bool) bool, ...request.Option) error + ListMultipartReadSetUploads(*omics.ListMultipartReadSetUploadsInput) (*omics.ListMultipartReadSetUploadsOutput, error) + ListMultipartReadSetUploadsWithContext(aws.Context, *omics.ListMultipartReadSetUploadsInput, ...request.Option) (*omics.ListMultipartReadSetUploadsOutput, error) + ListMultipartReadSetUploadsRequest(*omics.ListMultipartReadSetUploadsInput) (*request.Request, *omics.ListMultipartReadSetUploadsOutput) + + ListMultipartReadSetUploadsPages(*omics.ListMultipartReadSetUploadsInput, func(*omics.ListMultipartReadSetUploadsOutput, bool) bool) error + ListMultipartReadSetUploadsPagesWithContext(aws.Context, *omics.ListMultipartReadSetUploadsInput, func(*omics.ListMultipartReadSetUploadsOutput, bool) bool, ...request.Option) error + ListReadSetActivationJobs(*omics.ListReadSetActivationJobsInput) (*omics.ListReadSetActivationJobsOutput, error) ListReadSetActivationJobsWithContext(aws.Context, *omics.ListReadSetActivationJobsInput, ...request.Option) (*omics.ListReadSetActivationJobsOutput, error) ListReadSetActivationJobsRequest(*omics.ListReadSetActivationJobsInput) (*request.Request, *omics.ListReadSetActivationJobsOutput) @@ -239,6 +258,13 @@ type OmicsAPI interface { ListReadSetImportJobsPages(*omics.ListReadSetImportJobsInput, func(*omics.ListReadSetImportJobsOutput, bool) bool) error ListReadSetImportJobsPagesWithContext(aws.Context, *omics.ListReadSetImportJobsInput, func(*omics.ListReadSetImportJobsOutput, bool) bool, ...request.Option) error + ListReadSetUploadParts(*omics.ListReadSetUploadPartsInput) (*omics.ListReadSetUploadPartsOutput, error) + ListReadSetUploadPartsWithContext(aws.Context, *omics.ListReadSetUploadPartsInput, ...request.Option) (*omics.ListReadSetUploadPartsOutput, error) + ListReadSetUploadPartsRequest(*omics.ListReadSetUploadPartsInput) (*request.Request, *omics.ListReadSetUploadPartsOutput) + + ListReadSetUploadPartsPages(*omics.ListReadSetUploadPartsInput, func(*omics.ListReadSetUploadPartsOutput, bool) bool) error + ListReadSetUploadPartsPagesWithContext(aws.Context, *omics.ListReadSetUploadPartsInput, func(*omics.ListReadSetUploadPartsOutput, bool) bool, ...request.Option) error + ListReadSets(*omics.ListReadSetsInput) (*omics.ListReadSetsOutput, error) ListReadSetsWithContext(aws.Context, *omics.ListReadSetsInput, ...request.Option) (*omics.ListReadSetsOutput, error) ListReadSetsRequest(*omics.ListReadSetsInput) (*request.Request, *omics.ListReadSetsOutput) @@ -372,6 +398,10 @@ type OmicsAPI interface { UpdateWorkflowWithContext(aws.Context, *omics.UpdateWorkflowInput, ...request.Option) (*omics.UpdateWorkflowOutput, error) UpdateWorkflowRequest(*omics.UpdateWorkflowInput) (*request.Request, *omics.UpdateWorkflowOutput) + UploadReadSetPart(*omics.UploadReadSetPartInput) (*omics.UploadReadSetPartOutput, error) + UploadReadSetPartWithContext(aws.Context, *omics.UploadReadSetPartInput, ...request.Option) (*omics.UploadReadSetPartOutput, error) + UploadReadSetPartRequest(*omics.UploadReadSetPartInput) (*request.Request, *omics.UploadReadSetPartOutput) + WaitUntilAnnotationImportJobCreated(*omics.GetAnnotationImportJobInput) error WaitUntilAnnotationImportJobCreatedWithContext(aws.Context, *omics.GetAnnotationImportJobInput, ...request.WaiterOption) error diff --git a/service/opensearchservice/api.go b/service/opensearchservice/api.go index f65e2fad691..860cfc16cf1 100644 --- a/service/opensearchservice/api.go +++ b/service/opensearchservice/api.go @@ -10565,7 +10565,7 @@ type DescribePackagesFilter struct { Name *string `type:"string" enum:"DescribePackagesFilterName"` // A non-empty list of values for the specified filter field. - Value []*string `type:"list"` + Value []*string `min:"1" type:"list"` } // String returns the string representation. @@ -10586,6 +10586,19 @@ func (s DescribePackagesFilter) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePackagesFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePackagesFilter"} + if s.Value != nil && len(s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetName sets the Name field's value. func (s *DescribePackagesFilter) SetName(v string) *DescribePackagesFilter { s.Name = &v @@ -10633,6 +10646,26 @@ func (s DescribePackagesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePackagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePackagesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetFilters sets the Filters field's value. func (s *DescribePackagesInput) SetFilters(v []*DescribePackagesFilter) *DescribePackagesInput { s.Filters = v diff --git a/service/support/api.go b/service/support/api.go index 23667a2f116..b35ddd90305 100644 --- a/service/support/api.go +++ b/service/support/api.go @@ -751,6 +751,99 @@ func (c *Support) DescribeCommunicationsPagesWithContext(ctx aws.Context, input return p.Err() } +const opDescribeCreateCaseOptions = "DescribeCreateCaseOptions" + +// DescribeCreateCaseOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCreateCaseOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCreateCaseOptions for more information on using the DescribeCreateCaseOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeCreateCaseOptionsRequest method. +// req, resp := client.DescribeCreateCaseOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeCreateCaseOptions +func (c *Support) DescribeCreateCaseOptionsRequest(input *DescribeCreateCaseOptionsInput) (req *request.Request, output *DescribeCreateCaseOptionsOutput) { + op := &request.Operation{ + Name: opDescribeCreateCaseOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCreateCaseOptionsInput{} + } + + output = &DescribeCreateCaseOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCreateCaseOptions API operation for AWS Support. +// +// Returns a list of CreateCaseOption types along with the corresponding supported +// hours and language availability. You can specify the language categoryCode, +// issueType and serviceCode used to retrieve the CreateCaseOptions. +// +// - You must have a Business, Enterprise On-Ramp, or Enterprise Support +// plan to use the Amazon Web Services Support API. +// +// - If you call the Amazon Web Services Support API from an account that +// doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, +// the SubscriptionRequiredException error message appears. For information +// about changing your support plan, see Amazon Web Services Support (http://aws.amazon.com/premiumsupport/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Support's +// API operation DescribeCreateCaseOptions for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An internal server error occurred. +// +// - ThrottlingException +// You have exceeded the maximum allowed TPS (Transactions Per Second) for the +// operations. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeCreateCaseOptions +func (c *Support) DescribeCreateCaseOptions(input *DescribeCreateCaseOptionsInput) (*DescribeCreateCaseOptionsOutput, error) { + req, out := c.DescribeCreateCaseOptionsRequest(input) + return out, req.Send() +} + +// DescribeCreateCaseOptionsWithContext is the same as DescribeCreateCaseOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCreateCaseOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Support) DescribeCreateCaseOptionsWithContext(ctx aws.Context, input *DescribeCreateCaseOptionsInput, opts ...request.Option) (*DescribeCreateCaseOptionsOutput, error) { + req, out := c.DescribeCreateCaseOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeServices = "DescribeServices" // DescribeServicesRequest generates a "aws/request.Request" representing the @@ -936,6 +1029,99 @@ func (c *Support) DescribeSeverityLevelsWithContext(ctx aws.Context, input *Desc return out, req.Send() } +const opDescribeSupportedLanguages = "DescribeSupportedLanguages" + +// DescribeSupportedLanguagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSupportedLanguages operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSupportedLanguages for more information on using the DescribeSupportedLanguages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeSupportedLanguagesRequest method. +// req, resp := client.DescribeSupportedLanguagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeSupportedLanguages +func (c *Support) DescribeSupportedLanguagesRequest(input *DescribeSupportedLanguagesInput) (req *request.Request, output *DescribeSupportedLanguagesOutput) { + op := &request.Operation{ + Name: opDescribeSupportedLanguages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSupportedLanguagesInput{} + } + + output = &DescribeSupportedLanguagesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSupportedLanguages API operation for AWS Support. +// +// Returns a list of supported languages for a specified categoryCode, issueType +// and serviceCode. The returned supported languages will include a ISO 639-1 +// code for the language, and the language display name. +// +// - You must have a Business, Enterprise On-Ramp, or Enterprise Support +// plan to use the Amazon Web Services Support API. +// +// - If you call the Amazon Web Services Support API from an account that +// doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, +// the SubscriptionRequiredException error message appears. For information +// about changing your support plan, see Amazon Web Services Support (http://aws.amazon.com/premiumsupport/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Support's +// API operation DescribeSupportedLanguages for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An internal server error occurred. +// +// - ThrottlingException +// You have exceeded the maximum allowed TPS (Transactions Per Second) for the +// operations. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeSupportedLanguages +func (c *Support) DescribeSupportedLanguages(input *DescribeSupportedLanguagesInput) (*DescribeSupportedLanguagesOutput, error) { + req, out := c.DescribeSupportedLanguagesRequest(input) + return out, req.Send() +} + +// DescribeSupportedLanguagesWithContext is the same as DescribeSupportedLanguages with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSupportedLanguages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Support) DescribeSupportedLanguagesWithContext(ctx aws.Context, input *DescribeSupportedLanguagesInput, opts ...request.Option) (*DescribeSupportedLanguagesOutput, error) { + req, out := c.DescribeSupportedLanguagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeTrustedAdvisorCheckRefreshStatuses = "DescribeTrustedAdvisorCheckRefreshStatuses" // DescribeTrustedAdvisorCheckRefreshStatusesRequest generates a "aws/request.Request" representing the @@ -1011,9 +1197,14 @@ func (c *Support) DescribeTrustedAdvisorCheckRefreshStatusesRequest(input *Descr // API operation DescribeTrustedAdvisorCheckRefreshStatuses for usage and error information. // // Returned Error Types: +// // - InternalServerError // An internal server error occurred. // +// - ThrottlingException +// You have exceeded the maximum allowed TPS (Transactions Per Second) for the +// operations. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeTrustedAdvisorCheckRefreshStatuses func (c *Support) DescribeTrustedAdvisorCheckRefreshStatuses(input *DescribeTrustedAdvisorCheckRefreshStatusesInput) (*DescribeTrustedAdvisorCheckRefreshStatusesOutput, error) { req, out := c.DescribeTrustedAdvisorCheckRefreshStatusesRequest(input) @@ -1124,9 +1315,14 @@ func (c *Support) DescribeTrustedAdvisorCheckResultRequest(input *DescribeTruste // API operation DescribeTrustedAdvisorCheckResult for usage and error information. // // Returned Error Types: +// // - InternalServerError // An internal server error occurred. // +// - ThrottlingException +// You have exceeded the maximum allowed TPS (Transactions Per Second) for the +// operations. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeTrustedAdvisorCheckResult func (c *Support) DescribeTrustedAdvisorCheckResult(input *DescribeTrustedAdvisorCheckResultInput) (*DescribeTrustedAdvisorCheckResultOutput, error) { req, out := c.DescribeTrustedAdvisorCheckResultRequest(input) @@ -1221,9 +1417,14 @@ func (c *Support) DescribeTrustedAdvisorCheckSummariesRequest(input *DescribeTru // API operation DescribeTrustedAdvisorCheckSummaries for usage and error information. // // Returned Error Types: +// // - InternalServerError // An internal server error occurred. // +// - ThrottlingException +// You have exceeded the maximum allowed TPS (Transactions Per Second) for the +// operations. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeTrustedAdvisorCheckSummaries func (c *Support) DescribeTrustedAdvisorCheckSummaries(input *DescribeTrustedAdvisorCheckSummariesInput) (*DescribeTrustedAdvisorCheckSummariesOutput, error) { req, out := c.DescribeTrustedAdvisorCheckSummariesRequest(input) @@ -1323,9 +1524,14 @@ func (c *Support) DescribeTrustedAdvisorChecksRequest(input *DescribeTrustedAdvi // API operation DescribeTrustedAdvisorChecks for usage and error information. // // Returned Error Types: +// // - InternalServerError // An internal server error occurred. // +// - ThrottlingException +// You have exceeded the maximum allowed TPS (Transactions Per Second) for the +// operations. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeTrustedAdvisorChecks func (c *Support) DescribeTrustedAdvisorChecks(input *DescribeTrustedAdvisorChecksInput) (*DescribeTrustedAdvisorChecksOutput, error) { req, out := c.DescribeTrustedAdvisorChecksRequest(input) @@ -2255,9 +2461,10 @@ func (s *CaseCreationLimitExceeded) RequestID() string { // Support Center. // // - language - The language in which Amazon Web Services Support handles -// the case. Amazon Web Services Support currently supports English ("en") -// and Japanese ("ja"). You must specify the ISO 639-1 code for the language -// parameter if you want support in that language. +// the case. Amazon Web Services Support currently supports Chinese (“zh”), +// English ("en"), Japanese ("ja") and Korean (“ko”). You must specify +// the ISO 639-1 code for the language parameter if you want support in that +// language. // // - nextToken - A resumption point for pagination. // @@ -2298,9 +2505,9 @@ type CaseDetails struct { DisplayId *string `locationName:"displayId" type:"string"` // The language in which Amazon Web Services Support handles the case. Amazon - // Web Services Support currently supports English ("en") and Japanese ("ja"). - // You must specify the ISO 639-1 code for the language parameter if you want - // support in that language. + // Web Services Support currently supports Chinese (“zh”), English ("en"), + // Japanese ("ja") and Korean (“ko”). You must specify the ISO 639-1 code + // for the language parameter if you want support in that language. Language *string `locationName:"language" type:"string"` // The five most recent communications between you and Amazon Web Services Support @@ -2558,9 +2765,9 @@ type Communication struct { CaseId *string `locationName:"caseId" type:"string"` // The identity of the account that submitted, or responded to, the support - // case. Customer entries include the role or IAM user as well as the email - // address. For example, "AdminRole (Role) . Entries from - // the Amazon Web Services Support team display "Amazon Web Services," and don't + // case. Customer entries include the IAM role as well as the email address + // (for example, "AdminRole (Role) ). Entries from the + // Amazon Web Services Support team display "Amazon Web Services," and don't // show an email address. SubmittedBy *string `locationName:"submittedBy" type:"string"` @@ -2616,6 +2823,72 @@ func (s *Communication) SetTimeCreated(v string) *Communication { return s } +// A JSON-formatted object that contains the CommunicationTypeOptions for creating +// a case for a certain communication channel. It is contained in the response +// from a DescribeCreateCaseOptions request. CommunicationTypeOptions contains +// the following fields: +// +// - datesWithoutSupport - A JSON-formatted list containing date and time +// ranges for periods without support in UTC time. Date and time format is +// RFC 3339 : 'yyyy-MM-dd'T'HH:mm:ss.SSSZZ'. +// +// - supportedHours - A JSON-formatted list containing time ranges when support +// are available. Time format is RFC 3339 : 'HH:mm:ss.SSS'. +// +// - type - A string value indicating the communication type that the aforementioned +// rules apply to. At the moment the type value can assume one of 3 values +// at the moment chat, web and call. +type CommunicationTypeOptions struct { + _ struct{} `type:"structure"` + + // A JSON-formatted list containing date and time ranges for periods without + // support + DatesWithoutSupport []*DateInterval `locationName:"datesWithoutSupport" type:"list"` + + // A JSON-formatted list containing time ranges when support is available. + SupportedHours []*SupportedHour `locationName:"supportedHours" type:"list"` + + // A string value indicating the communication type. At the moment the type + // value can assume one of 3 values at the moment chat, web and call. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CommunicationTypeOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CommunicationTypeOptions) GoString() string { + return s.String() +} + +// SetDatesWithoutSupport sets the DatesWithoutSupport field's value. +func (s *CommunicationTypeOptions) SetDatesWithoutSupport(v []*DateInterval) *CommunicationTypeOptions { + s.DatesWithoutSupport = v + return s +} + +// SetSupportedHours sets the SupportedHours field's value. +func (s *CommunicationTypeOptions) SetSupportedHours(v []*SupportedHour) *CommunicationTypeOptions { + s.SupportedHours = v + return s +} + +// SetType sets the Type field's value. +func (s *CommunicationTypeOptions) SetType(v string) *CommunicationTypeOptions { + s.Type = &v + return s +} + type CreateCaseInput struct { _ struct{} `type:"structure"` @@ -2646,9 +2919,9 @@ type CreateCaseInput struct { IssueType *string `locationName:"issueType" type:"string"` // The language in which Amazon Web Services Support handles the case. Amazon - // Web Services Support currently supports English ("en") and Japanese ("ja"). - // You must specify the ISO 639-1 code for the language parameter if you want - // support in that language. + // Web Services Support currently supports Chinese (“zh”), English ("en"), + // Japanese ("ja") and Korean (“ko”). You must specify the ISO 639-1 code + // for the language parameter if you want support in that language. Language *string `locationName:"language" type:"string"` // The code for the Amazon Web Services service. You can use the DescribeServices @@ -2800,6 +3073,48 @@ func (s *CreateCaseOutput) SetCaseId(v string) *CreateCaseOutput { return s } +// Date and time (UTC) format in RFC 3339 : 'yyyy-MM-dd'T'HH:mm:ss.SSSZZ'. +type DateInterval struct { + _ struct{} `type:"structure"` + + // End Date Time (UTC). RFC 3339 format : 'yyyy-MM-dd'T'HH:mm:ss.SSSZZ'. + EndDateTime *string `locationName:"endDateTime" min:"8" type:"string"` + + // A JSON object containing start and date time (UTC). Date and time format + // is RFC 3339 : 'yyyy-MM-dd'T'HH:mm:ss.SSSZZ'. + StartDateTime *string `locationName:"startDateTime" min:"8" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DateInterval) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DateInterval) GoString() string { + return s.String() +} + +// SetEndDateTime sets the EndDateTime field's value. +func (s *DateInterval) SetEndDateTime(v string) *DateInterval { + s.EndDateTime = &v + return s +} + +// SetStartDateTime sets the StartDateTime field's value. +func (s *DateInterval) SetStartDateTime(v string) *DateInterval { + s.StartDateTime = &v + return s +} + type DescribeAttachmentInput struct { _ struct{} `type:"structure"` @@ -2979,9 +3294,9 @@ type DescribeCasesInput struct { IncludeResolvedCases *bool `locationName:"includeResolvedCases" type:"boolean"` // The language in which Amazon Web Services Support handles the case. Amazon - // Web Services Support currently supports English ("en") and Japanese ("ja"). - // You must specify the ISO 639-1 code for the language parameter if you want - // support in that language. + // Web Services Support currently supports Chinese (“zh”), English ("en"), + // Japanese ("ja") and Korean (“ko”). You must specify the ISO 639-1 code + // for the language parameter if you want support in that language. Language *string `locationName:"language" type:"string"` // The maximum number of results to return before paginating. @@ -3248,13 +3563,155 @@ func (s *DescribeCommunicationsOutput) SetNextToken(v string) *DescribeCommunica return s } +type DescribeCreateCaseOptionsInput struct { + _ struct{} `type:"structure"` + + // The category of problem for the support case. You also use the DescribeServices + // operation to get the category code for a service. Each Amazon Web Services + // service defines its own set of category codes. + // + // CategoryCode is a required field + CategoryCode *string `locationName:"categoryCode" type:"string" required:"true"` + + // The type of issue for the case. You can specify customer-service or technical. + // If you don't specify a value, the default is technical. + // + // IssueType is a required field + IssueType *string `locationName:"issueType" type:"string" required:"true"` + + // The language in which Amazon Web Services Support handles the case. Amazon + // Web Services Support currently supports Chinese (“zh”), English ("en"), + // Japanese ("ja") and Korean (“ko”). You must specify the ISO 639-1 code + // for the language parameter if you want support in that language. + // + // Language is a required field + Language *string `locationName:"language" type:"string" required:"true"` + + // The code for the Amazon Web Services service. You can use the DescribeServices + // operation to get the possible serviceCode values. + // + // ServiceCode is a required field + ServiceCode *string `locationName:"serviceCode" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeCreateCaseOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeCreateCaseOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCreateCaseOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCreateCaseOptionsInput"} + if s.CategoryCode == nil { + invalidParams.Add(request.NewErrParamRequired("CategoryCode")) + } + if s.IssueType == nil { + invalidParams.Add(request.NewErrParamRequired("IssueType")) + } + if s.Language == nil { + invalidParams.Add(request.NewErrParamRequired("Language")) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCategoryCode sets the CategoryCode field's value. +func (s *DescribeCreateCaseOptionsInput) SetCategoryCode(v string) *DescribeCreateCaseOptionsInput { + s.CategoryCode = &v + return s +} + +// SetIssueType sets the IssueType field's value. +func (s *DescribeCreateCaseOptionsInput) SetIssueType(v string) *DescribeCreateCaseOptionsInput { + s.IssueType = &v + return s +} + +// SetLanguage sets the Language field's value. +func (s *DescribeCreateCaseOptionsInput) SetLanguage(v string) *DescribeCreateCaseOptionsInput { + s.Language = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *DescribeCreateCaseOptionsInput) SetServiceCode(v string) *DescribeCreateCaseOptionsInput { + s.ServiceCode = &v + return s +} + +type DescribeCreateCaseOptionsOutput struct { + _ struct{} `type:"structure"` + + // A JSON-formatted array that contains the available communication type options, + // along with the available support timeframes for the given inputs. + CommunicationTypes []*CommunicationTypeOptions `locationName:"communicationTypes" min:"1" type:"list"` + + // Language availability can be any of the following: + // + // * available + // + // * best_effort + // + // * unavailable + LanguageAvailability *string `locationName:"languageAvailability" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeCreateCaseOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeCreateCaseOptionsOutput) GoString() string { + return s.String() +} + +// SetCommunicationTypes sets the CommunicationTypes field's value. +func (s *DescribeCreateCaseOptionsOutput) SetCommunicationTypes(v []*CommunicationTypeOptions) *DescribeCreateCaseOptionsOutput { + s.CommunicationTypes = v + return s +} + +// SetLanguageAvailability sets the LanguageAvailability field's value. +func (s *DescribeCreateCaseOptionsOutput) SetLanguageAvailability(v string) *DescribeCreateCaseOptionsOutput { + s.LanguageAvailability = &v + return s +} + type DescribeServicesInput struct { _ struct{} `type:"structure"` // The language in which Amazon Web Services Support handles the case. Amazon - // Web Services Support currently supports English ("en") and Japanese ("ja"). - // You must specify the ISO 639-1 code for the language parameter if you want - // support in that language. + // Web Services Support currently supports Chinese (“zh”), English ("en"), + // Japanese ("ja") and Korean (“ko”). You must specify the ISO 639-1 code + // for the language parameter if you want support in that language. Language *string `locationName:"language" type:"string"` // A JSON-formatted list of service codes available for Amazon Web Services @@ -3329,9 +3786,9 @@ type DescribeSeverityLevelsInput struct { _ struct{} `type:"structure"` // The language in which Amazon Web Services Support handles the case. Amazon - // Web Services Support currently supports English ("en") and Japanese ("ja"). - // You must specify the ISO 639-1 code for the language parameter if you want - // support in that language. + // Web Services Support currently supports Chinese (“zh”), English ("en"), + // Japanese ("ja") and Korean (“ko”). You must specify the ISO 639-1 code + // for the language parameter if you want support in that language. Language *string `locationName:"language" type:"string"` } @@ -3392,6 +3849,117 @@ func (s *DescribeSeverityLevelsOutput) SetSeverityLevels(v []*SeverityLevel) *De return s } +type DescribeSupportedLanguagesInput struct { + _ struct{} `type:"structure"` + + // The category of problem for the support case. You also use the DescribeServices + // operation to get the category code for a service. Each Amazon Web Services + // service defines its own set of category codes. + // + // CategoryCode is a required field + CategoryCode *string `locationName:"categoryCode" type:"string" required:"true"` + + // The type of issue for the case. You can specify customer-service or technical. + // + // IssueType is a required field + IssueType *string `locationName:"issueType" min:"9" type:"string" required:"true"` + + // The code for the Amazon Web Services service. You can use the DescribeServices + // operation to get the possible serviceCode values. + // + // ServiceCode is a required field + ServiceCode *string `locationName:"serviceCode" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSupportedLanguagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSupportedLanguagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSupportedLanguagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSupportedLanguagesInput"} + if s.CategoryCode == nil { + invalidParams.Add(request.NewErrParamRequired("CategoryCode")) + } + if s.IssueType == nil { + invalidParams.Add(request.NewErrParamRequired("IssueType")) + } + if s.IssueType != nil && len(*s.IssueType) < 9 { + invalidParams.Add(request.NewErrParamMinLen("IssueType", 9)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCategoryCode sets the CategoryCode field's value. +func (s *DescribeSupportedLanguagesInput) SetCategoryCode(v string) *DescribeSupportedLanguagesInput { + s.CategoryCode = &v + return s +} + +// SetIssueType sets the IssueType field's value. +func (s *DescribeSupportedLanguagesInput) SetIssueType(v string) *DescribeSupportedLanguagesInput { + s.IssueType = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *DescribeSupportedLanguagesInput) SetServiceCode(v string) *DescribeSupportedLanguagesInput { + s.ServiceCode = &v + return s +} + +type DescribeSupportedLanguagesOutput struct { + _ struct{} `type:"structure"` + + // A JSON-formatted array that contains the available ISO 639-1 language codes. + SupportedLanguages []*SupportedLanguage `locationName:"supportedLanguages" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSupportedLanguagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSupportedLanguagesOutput) GoString() string { + return s.String() +} + +// SetSupportedLanguages sets the SupportedLanguages field's value. +func (s *DescribeSupportedLanguagesOutput) SetSupportedLanguages(v []*SupportedLanguage) *DescribeSupportedLanguagesOutput { + s.SupportedLanguages = v + return s +} + type DescribeTrustedAdvisorCheckRefreshStatusesInput struct { _ struct{} `type:"structure"` @@ -4160,6 +4728,164 @@ func (s *SeverityLevel) SetName(v string) *SeverityLevel { return s } +// Time range object with startTime and endTime range in RFC 3339 format. 'HH:mm:ss.SSS'. +type SupportedHour struct { + _ struct{} `type:"structure"` + + // End Time. RFC 3339 format 'HH:mm:ss.SSS'. + EndTime *string `locationName:"endTime" type:"string"` + + // Start Time. RFC 3339 format 'HH:mm:ss.SSS'. + StartTime *string `locationName:"startTime" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SupportedHour) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SupportedHour) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *SupportedHour) SetEndTime(v string) *SupportedHour { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *SupportedHour) SetStartTime(v string) *SupportedHour { + s.StartTime = &v + return s +} + +// A JSON-formatted object that contains the available ISO 639-1 language code, +// language name and langauge display value. The language code is what should +// be used in the CreateCase call. +type SupportedLanguage struct { + _ struct{} `type:"structure"` + + // 2 digit ISO 639-1 code. e.g. en + Code *string `locationName:"code" type:"string"` + + // Language display value e.g. ENGLISH + Display *string `locationName:"display" type:"string"` + + // Full language description e.g. ENGLISH + Language *string `locationName:"language" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SupportedLanguage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SupportedLanguage) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *SupportedLanguage) SetCode(v string) *SupportedLanguage { + s.Code = &v + return s +} + +// SetDisplay sets the Display field's value. +func (s *SupportedLanguage) SetDisplay(v string) *SupportedLanguage { + s.Display = &v + return s +} + +// SetLanguage sets the Language field's value. +func (s *SupportedLanguage) SetLanguage(v string) *SupportedLanguage { + s.Language = &v + return s +} + +// You have exceeded the maximum allowed TPS (Transactions Per Second) for the +// operations. +type ThrottlingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ThrottlingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ThrottlingException) GoString() string { + return s.String() +} + +func newErrorThrottlingException(v protocol.ResponseMetadata) error { + return &ThrottlingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ThrottlingException) Code() string { + return "ThrottlingException" +} + +// Message returns the exception's message. +func (s *ThrottlingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ThrottlingException) OrigErr() error { + return nil +} + +func (s *ThrottlingException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID +} + // The container for summary information that relates to the category of the // Trusted Advisor check. type TrustedAdvisorCategorySpecificSummary struct { diff --git a/service/support/errors.go b/service/support/errors.go index 97cbee38a44..a468ead4089 100644 --- a/service/support/errors.go +++ b/service/support/errors.go @@ -65,6 +65,13 @@ const ( // // An internal server error occurred. ErrCodeInternalServerError = "InternalServerError" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // You have exceeded the maximum allowed TPS (Transactions Per Second) for the + // operations. + ErrCodeThrottlingException = "ThrottlingException" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ @@ -77,4 +84,5 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "CaseIdNotFound": newErrorCaseIdNotFound, "DescribeAttachmentLimitExceeded": newErrorDescribeAttachmentLimitExceeded, "InternalServerError": newErrorInternalServerError, + "ThrottlingException": newErrorThrottlingException, } diff --git a/service/support/supportiface/interface.go b/service/support/supportiface/interface.go index a8d4cdacb0a..6b4efb76b33 100644 --- a/service/support/supportiface/interface.go +++ b/service/support/supportiface/interface.go @@ -90,6 +90,10 @@ type SupportAPI interface { DescribeCommunicationsPages(*support.DescribeCommunicationsInput, func(*support.DescribeCommunicationsOutput, bool) bool) error DescribeCommunicationsPagesWithContext(aws.Context, *support.DescribeCommunicationsInput, func(*support.DescribeCommunicationsOutput, bool) bool, ...request.Option) error + DescribeCreateCaseOptions(*support.DescribeCreateCaseOptionsInput) (*support.DescribeCreateCaseOptionsOutput, error) + DescribeCreateCaseOptionsWithContext(aws.Context, *support.DescribeCreateCaseOptionsInput, ...request.Option) (*support.DescribeCreateCaseOptionsOutput, error) + DescribeCreateCaseOptionsRequest(*support.DescribeCreateCaseOptionsInput) (*request.Request, *support.DescribeCreateCaseOptionsOutput) + DescribeServices(*support.DescribeServicesInput) (*support.DescribeServicesOutput, error) DescribeServicesWithContext(aws.Context, *support.DescribeServicesInput, ...request.Option) (*support.DescribeServicesOutput, error) DescribeServicesRequest(*support.DescribeServicesInput) (*request.Request, *support.DescribeServicesOutput) @@ -98,6 +102,10 @@ type SupportAPI interface { DescribeSeverityLevelsWithContext(aws.Context, *support.DescribeSeverityLevelsInput, ...request.Option) (*support.DescribeSeverityLevelsOutput, error) DescribeSeverityLevelsRequest(*support.DescribeSeverityLevelsInput) (*request.Request, *support.DescribeSeverityLevelsOutput) + DescribeSupportedLanguages(*support.DescribeSupportedLanguagesInput) (*support.DescribeSupportedLanguagesOutput, error) + DescribeSupportedLanguagesWithContext(aws.Context, *support.DescribeSupportedLanguagesInput, ...request.Option) (*support.DescribeSupportedLanguagesOutput, error) + DescribeSupportedLanguagesRequest(*support.DescribeSupportedLanguagesInput) (*request.Request, *support.DescribeSupportedLanguagesOutput) + DescribeTrustedAdvisorCheckRefreshStatuses(*support.DescribeTrustedAdvisorCheckRefreshStatusesInput) (*support.DescribeTrustedAdvisorCheckRefreshStatusesOutput, error) DescribeTrustedAdvisorCheckRefreshStatusesWithContext(aws.Context, *support.DescribeTrustedAdvisorCheckRefreshStatusesInput, ...request.Option) (*support.DescribeTrustedAdvisorCheckRefreshStatusesOutput, error) DescribeTrustedAdvisorCheckRefreshStatusesRequest(*support.DescribeTrustedAdvisorCheckRefreshStatusesInput) (*request.Request, *support.DescribeTrustedAdvisorCheckRefreshStatusesOutput) From d47c75a58a612a38755747171b8102288965508e Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Mon, 15 May 2023 11:32:41 -0700 Subject: [PATCH 6/7] Release v1.44.263 (2023-05-15) (#4840) Release v1.44.263 (2023-05-15) === ### Service Client Updates * `service/athena`: Updates service API and documentation * You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. * `service/kafka`: Updates service API * `service/rekognition`: Updates service API and documentation * This release adds a new EyeDirection attribute in Amazon Rekognition DetectFaces and IndexFaces APIs which predicts the yaw and pitch angles of a person's eye gaze direction for each face detected in the image. * `service/rolesanywhere`: Updates service API and documentation * `service/transfer`: Updates service API and documentation * This release introduces the ability to require both password and SSH key when users authenticate to your Transfer Family servers that use the SFTP protocol. --- CHANGELOG.md | 13 + aws/endpoints/defaults.go | 26 + aws/version.go | 2 +- models/apis/athena/2017-05-18/api-2.json | 3 +- models/apis/athena/2017-05-18/docs-2.json | 1 + models/apis/kafka/2018-11-14/api-2.json | 2 +- models/apis/rekognition/2016-06-27/api-2.json | 12 +- .../apis/rekognition/2016-06-27/docs-2.json | 9 + .../apis/rolesanywhere/2018-05-10/api-2.json | 164 +++- .../apis/rolesanywhere/2018-05-10/docs-2.json | 178 +++- .../2018-05-10/endpoint-rule-set-1.json | 350 +++++++ .../2018-05-10/endpoint-tests-1.json | 548 +++++++++++ models/apis/transfer/2018-11-05/api-2.json | 12 +- models/apis/transfer/2018-11-05/docs-2.json | 90 +- .../2018-11-05/endpoint-rule-set-1.json | 380 ++++---- .../transfer/2018-11-05/endpoint-tests-1.json | 241 +++-- models/endpoints/endpoints.json | 22 +- service/athena/api.go | 10 + service/rekognition/api.go | 64 ++ service/rolesanywhere/api.go | 861 ++++++++++++++++-- service/rolesanywhere/doc.go | 36 +- .../rolesanywhereiface/interface.go | 8 + service/transfer/api.go | 259 ++++-- 23 files changed, 2757 insertions(+), 534 deletions(-) create mode 100644 models/apis/rolesanywhere/2018-05-10/endpoint-rule-set-1.json create mode 100644 models/apis/rolesanywhere/2018-05-10/endpoint-tests-1.json diff --git a/CHANGELOG.md b/CHANGELOG.md index af33473b57f..18a7c9e0e4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +Release v1.44.263 (2023-05-15) +=== + +### Service Client Updates +* `service/athena`: Updates service API and documentation + * You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. +* `service/kafka`: Updates service API +* `service/rekognition`: Updates service API and documentation + * This release adds a new EyeDirection attribute in Amazon Rekognition DetectFaces and IndexFaces APIs which predicts the yaw and pitch angles of a person's eye gaze direction for each face detected in the image. +* `service/rolesanywhere`: Updates service API and documentation +* `service/transfer`: Updates service API and documentation + * This release introduces the ability to require both password and SSH key when users authenticate to your Transfer Family servers that use the SFTP protocol. + Release v1.44.262 (2023-05-11) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 83a27225b0d..2d665d61ba8 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -36600,9 +36600,35 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, }, }, "runtime.lex": service{ diff --git a/aws/version.go b/aws/version.go index 773561f1a36..9e1a41ed169 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.262" +const SDKVersion = "1.44.263" diff --git a/models/apis/athena/2017-05-18/api-2.json b/models/apis/athena/2017-05-18/api-2.json index 4c833331778..edb6e9bcfb5 100644 --- a/models/apis/athena/2017-05-18/api-2.json +++ b/models/apis/athena/2017-05-18/api-2.json @@ -1565,7 +1565,8 @@ "CoordinatorDpuSize":{"shape":"CoordinatorDpuSize"}, "MaxConcurrentDpus":{"shape":"MaxConcurrentDpus"}, "DefaultExecutorDpuSize":{"shape":"DefaultExecutorDpuSize"}, - "AdditionalConfigs":{"shape":"ParametersMap"} + "AdditionalConfigs":{"shape":"ParametersMap"}, + "SparkProperties":{"shape":"ParametersMap"} } }, "EngineVersion":{ diff --git a/models/apis/athena/2017-05-18/docs-2.json b/models/apis/athena/2017-05-18/docs-2.json index de8cc6ba175..1fdb5569879 100644 --- a/models/apis/athena/2017-05-18/docs-2.json +++ b/models/apis/athena/2017-05-18/docs-2.json @@ -1428,6 +1428,7 @@ "DataCatalog$Parameters": "

Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.

", "Database$Parameters": "

A set of custom key/value pairs.

", "EngineConfiguration$AdditionalConfigs": "

Contains additional notebook engine MAP<string, string> parameter mappings in the form of key-value pairs. To specify an Athena notebook that the Jupyter server will download and serve, specify a value for the StartSessionRequest$NotebookVersion field, and then add a key named NotebookId to AdditionalConfigs that has the value of the Athena notebook ID.

", + "EngineConfiguration$SparkProperties": "

Specifies custom jar files and Spark properties for use cases like cluster encryption, table formats, and general Spark tuning.

", "TableMetadata$Parameters": "

A set of custom key/value pairs for table properties.

", "UpdateDataCatalogInput$Parameters": "

Specifies the Lambda function or functions to use for updating the data catalog. This is a mapping whose values depend on the catalog type.

" } diff --git a/models/apis/kafka/2018-11-14/api-2.json b/models/apis/kafka/2018-11-14/api-2.json index ad95ac9d028..0775a9e0243 100644 --- a/models/apis/kafka/2018-11-14/api-2.json +++ b/models/apis/kafka/2018-11-14/api-2.json @@ -3543,7 +3543,7 @@ "locationName": "vpcConnectionArn" } }, - "required" : [ "VpcConnectionArn" ] + "required" : [ "VpcConnectionArn", "ClusterArn" ] }, "RejectClientVpcConnectionResponse" : { "type": "structure", diff --git a/models/apis/rekognition/2016-06-27/api-2.json b/models/apis/rekognition/2016-06-27/api-2.json index 379474fcfa9..ffaf54fbd93 100644 --- a/models/apis/rekognition/2016-06-27/api-2.json +++ b/models/apis/rekognition/2016-06-27/api-2.json @@ -1263,6 +1263,7 @@ "AGE_RANGE", "BEARD", "EMOTIONS", + "EYE_DIRECTION", "EYEGLASSES", "EYES_OPEN", "GENDER", @@ -2289,6 +2290,14 @@ "min":1, "pattern":"[a-zA-Z0-9_.\\-:]+" }, + "EyeDirection":{ + "type":"structure", + "members":{ + "Yaw":{"shape":"Degree"}, + "Pitch":{"shape":"Degree"}, + "Confidence":{"shape":"Percent"} + } + }, "EyeOpen":{ "type":"structure", "members":{ @@ -2339,7 +2348,8 @@ "Pose":{"shape":"Pose"}, "Quality":{"shape":"ImageQuality"}, "Confidence":{"shape":"Percent"}, - "FaceOccluded":{"shape":"FaceOccluded"} + "FaceOccluded":{"shape":"FaceOccluded"}, + "EyeDirection":{"shape":"EyeDirection"} } }, "FaceDetailList":{ diff --git a/models/apis/rekognition/2016-06-27/docs-2.json b/models/apis/rekognition/2016-06-27/docs-2.json index 20ab043f761..d6e688e2edf 100644 --- a/models/apis/rekognition/2016-06-27/docs-2.json +++ b/models/apis/rekognition/2016-06-27/docs-2.json @@ -619,6 +619,8 @@ "Degree": { "base": null, "refs": { + "EyeDirection$Yaw": "

Value representing eye direction on the yaw axis.

", + "EyeDirection$Pitch": "

Value representing eye direction on the pitch axis.

", "Pose$Roll": "

Value representing the face rotation on the roll axis.

", "Pose$Yaw": "

Value representing the face rotation on the yaw axis.

", "Pose$Pitch": "

Value representing the face rotation on the pitch axis.

" @@ -969,6 +971,12 @@ "IndexFacesRequest$ExternalImageId": "

The ID you want to assign to all the faces detected in the image.

" } }, + "EyeDirection": { + "base": "

Indicates the direction the eyes are gazing in (independent of the head pose) as determined by its pitch and yaw.

", + "refs": { + "FaceDetail$EyeDirection": "

Indicates the direction the eyes are gazing in, as defined by pitch and yaw.

" + } + }, "EyeOpen": { "base": "

Indicates whether or not the eyes on the face are open, and the confidence level in the determination.

", "refs": { @@ -1975,6 +1983,7 @@ "DominantColor$PixelPercent": "

The percentage of image pixels that have a given dominant color.

", "Emotion$Confidence": "

Level of confidence in the determination.

", "EquipmentDetection$Confidence": "

The confidence that Amazon Rekognition has that the bounding box (BoundingBox) contains an item of PPE.

", + "EyeDirection$Confidence": "

The confidence that the service has in its predicted eye direction.

", "EyeOpen$Confidence": "

Level of confidence in the determination.

", "Eyeglasses$Confidence": "

Level of confidence in the determination.

", "Face$Confidence": "

Confidence level that the bounding box contains a face (and not a different object such as a tree).

", diff --git a/models/apis/rolesanywhere/2018-05-10/api-2.json b/models/apis/rolesanywhere/2018-05-10/api-2.json index 0e2414e2381..825f9748194 100644 --- a/models/apis/rolesanywhere/2018-05-10/api-2.json +++ b/models/apis/rolesanywhere/2018-05-10/api-2.json @@ -310,6 +310,36 @@ {"shape":"AccessDeniedException"} ] }, + "PutNotificationSettings":{ + "name":"PutNotificationSettings", + "http":{ + "method":"PATCH", + "requestUri":"/put-notifications-settings", + "responseCode":200 + }, + "input":{"shape":"PutNotificationSettingsRequest"}, + "output":{"shape":"PutNotificationSettingsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ResetNotificationSettings":{ + "name":"ResetNotificationSettings", + "http":{ + "method":"PATCH", + "requestUri":"/reset-notifications-settings", + "responseCode":200 + }, + "input":{"shape":"ResetNotificationSettingsRequest"}, + "output":{"shape":"ResetNotificationSettingsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ] + }, "TagResource":{ "name":"TagResource", "http":{ @@ -431,7 +461,7 @@ "CreateProfileRequestDurationSecondsInteger":{ "type":"integer", "box":true, - "max":43200, + "max":3600, "min":900 }, "CreateTrustAnchorRequest":{ @@ -443,6 +473,7 @@ "members":{ "enabled":{"shape":"Boolean"}, "name":{"shape":"ResourceName"}, + "notificationSettings":{"shape":"NotificationSettings"}, "source":{"shape":"Source"}, "tags":{"shape":"TagList"} } @@ -615,6 +646,87 @@ "max":200, "min":1 }, + "NotificationChannel":{ + "type":"string", + "enum":["ALL"] + }, + "NotificationEvent":{ + "type":"string", + "enum":[ + "CA_CERTIFICATE_EXPIRY", + "END_ENTITY_CERTIFICATE_EXPIRY" + ] + }, + "NotificationSetting":{ + "type":"structure", + "required":[ + "enabled", + "event" + ], + "members":{ + "channel":{"shape":"NotificationChannel"}, + "enabled":{"shape":"Boolean"}, + "event":{"shape":"NotificationEvent"}, + "threshold":{"shape":"NotificationSettingThresholdInteger"} + } + }, + "NotificationSettingDetail":{ + "type":"structure", + "required":[ + "enabled", + "event" + ], + "members":{ + "channel":{"shape":"NotificationChannel"}, + "configuredBy":{"shape":"NotificationSettingDetailConfiguredByString"}, + "enabled":{"shape":"Boolean"}, + "event":{"shape":"NotificationEvent"}, + "threshold":{"shape":"NotificationSettingDetailThresholdInteger"} + } + }, + "NotificationSettingDetailConfiguredByString":{ + "type":"string", + "max":200, + "min":1 + }, + "NotificationSettingDetailThresholdInteger":{ + "type":"integer", + "box":true, + "max":360, + "min":1 + }, + "NotificationSettingDetails":{ + "type":"list", + "member":{"shape":"NotificationSettingDetail"}, + "max":50, + "min":0 + }, + "NotificationSettingKey":{ + "type":"structure", + "required":["event"], + "members":{ + "channel":{"shape":"NotificationChannel"}, + "event":{"shape":"NotificationEvent"} + } + }, + "NotificationSettingKeys":{ + "type":"list", + "member":{"shape":"NotificationSettingKey"}, + "max":50, + "min":0 + }, + "NotificationSettingThresholdInteger":{ + "type":"integer", + "box":true, + "max":360, + "min":1 + }, + "NotificationSettings":{ + "type":"list", + "member":{"shape":"NotificationSetting"}, + "max":50, + "min":0 + }, "ProfileArn":{ "type":"string", "max":1011, @@ -648,6 +760,42 @@ "type":"list", "member":{"shape":"ProfileDetail"} }, + "PutNotificationSettingsRequest":{ + "type":"structure", + "required":[ + "notificationSettings", + "trustAnchorId" + ], + "members":{ + "notificationSettings":{"shape":"NotificationSettings"}, + "trustAnchorId":{"shape":"Uuid"} + } + }, + "PutNotificationSettingsResponse":{ + "type":"structure", + "required":["trustAnchor"], + "members":{ + "trustAnchor":{"shape":"TrustAnchorDetail"} + } + }, + "ResetNotificationSettingsRequest":{ + "type":"structure", + "required":[ + "notificationSettingKeys", + "trustAnchorId" + ], + "members":{ + "notificationSettingKeys":{"shape":"NotificationSettingKeys"}, + "trustAnchorId":{"shape":"Uuid"} + } + }, + "ResetNotificationSettingsResponse":{ + "type":"structure", + "required":["trustAnchor"], + "members":{ + "trustAnchor":{"shape":"TrustAnchorDetail"} + } + }, "ResourceName":{ "type":"string", "max":255, @@ -732,10 +880,15 @@ "type":"structure", "members":{ "acmPcaArn":{"shape":"String"}, - "x509CertificateData":{"shape":"String"} + "x509CertificateData":{"shape":"SourceDataX509CertificateDataString"} }, "union":true }, + "SourceDataX509CertificateDataString":{ + "type":"string", + "max":8000, + "min":1 + }, "String":{"type":"string"}, "SubjectDetail":{ "type":"structure", @@ -798,13 +951,13 @@ "TagKeyList":{ "type":"list", "member":{"shape":"TagKey"}, - "max":50, + "max":200, "min":0 }, "TagList":{ "type":"list", "member":{"shape":"Tag"}, - "max":50, + "max":200, "min":0 }, "TagResourceRequest":{ @@ -853,6 +1006,7 @@ "createdAt":{"shape":"SyntheticTimestamp_date_time"}, "enabled":{"shape":"Boolean"}, "name":{"shape":"ResourceName"}, + "notificationSettings":{"shape":"NotificationSettingDetails"}, "source":{"shape":"Source"}, "trustAnchorArn":{"shape":"String"}, "trustAnchorId":{"shape":"Uuid"}, @@ -931,7 +1085,7 @@ "UpdateProfileRequestDurationSecondsInteger":{ "type":"integer", "box":true, - "max":43200, + "max":3600, "min":900 }, "UpdateProfileRequestSessionPolicyString":{ diff --git a/models/apis/rolesanywhere/2018-05-10/docs-2.json b/models/apis/rolesanywhere/2018-05-10/docs-2.json index dca96bf084f..7d0827b20d8 100644 --- a/models/apis/rolesanywhere/2018-05-10/docs-2.json +++ b/models/apis/rolesanywhere/2018-05-10/docs-2.json @@ -1,33 +1,35 @@ { "version": "2.0", - "service": "

AWS Identity and Access Management Roles Anywhere provides a secure way for your workloads such as servers, containers, and applications running outside of AWS to obtain Temporary AWS credentials. Your workloads can use the same IAM policies and roles that you have configured with native AWS applications to access AWS resources. Using IAM Roles Anywhere will eliminate the need to manage long term credentials for workloads running outside of AWS.

To use IAM Roles Anywhere customer workloads will need to use X.509 certificates issued by their Certificate Authority (CA) . The Certificate Authority (CA) needs to be registered with IAM Roles Anywhere as a trust anchor to establish trust between customer PKI and IAM Roles Anywhere. Customers who do not manage their own PKI system can use AWS Certificate Manager Private Certificate Authority (ACM PCA) to create a Certificate Authority and use that to establish trust with IAM Roles Anywhere

This guide describes the IAM rolesanywhere operations that you can call programmatically. For general information about IAM Roles Anywhere see https://docs.aws.amazon.com/

", + "service": "

Identity and Access Management Roles Anywhere provides a secure way for your workloads such as servers, containers, and applications that run outside of Amazon Web Services to obtain temporary Amazon Web Services credentials. Your workloads can use the same IAM policies and roles you have for native Amazon Web Services applications to access Amazon Web Services resources. Using IAM Roles Anywhere eliminates the need to manage long-term credentials for workloads running outside of Amazon Web Services.

To use IAM Roles Anywhere, your workloads must use X.509 certificates issued by their certificate authority (CA). You register the CA with IAM Roles Anywhere as a trust anchor to establish trust between your public key infrastructure (PKI) and IAM Roles Anywhere. If you don't manage your own PKI system, you can use Private Certificate Authority to create a CA and then use that to establish trust with IAM Roles Anywhere.

This guide describes the IAM Roles Anywhere operations that you can call programmatically. For more information about IAM Roles Anywhere, see the IAM Roles Anywhere User Guide.

", "operations": { - "CreateProfile": "

Creates a profile. A profile is configuration resource to list the roles that RolesAnywhere service is trusted to assume. In addition, by applying a profile you can intersect permissions with IAM managed policies.

Required permissions: rolesanywhere:CreateProfile.

", - "CreateTrustAnchor": "

Creates a trust anchor. You establish trust between IAM Roles Anywhere and your certificate authority (CA) by configuring a trust anchor. A Trust Anchor is defined either as a reference to a AWS Certificate Manager Private Certificate Authority (ACM PCA), or by uploading a Certificate Authority (CA) certificate. Your AWS workloads can authenticate with the trust anchor using certificates issued by the trusted Certificate Authority (CA) in exchange for temporary AWS credentials.

Required permissions: rolesanywhere:CreateTrustAnchor.

", + "CreateProfile": "

Creates a profile, a list of the roles that Roles Anywhere service is trusted to assume. You use profiles to intersect permissions with IAM managed policies.

Required permissions: rolesanywhere:CreateProfile.

", + "CreateTrustAnchor": "

Creates a trust anchor to establish trust between IAM Roles Anywhere and your certificate authority (CA). You can define a trust anchor as a reference to an Private Certificate Authority (Private CA) or by uploading a CA certificate. Your Amazon Web Services workloads can authenticate with the trust anchor using certificates issued by the CA in exchange for temporary Amazon Web Services credentials.

Required permissions: rolesanywhere:CreateTrustAnchor.

", "DeleteCrl": "

Deletes a certificate revocation list (CRL).

Required permissions: rolesanywhere:DeleteCrl.

", "DeleteProfile": "

Deletes a profile.

Required permissions: rolesanywhere:DeleteProfile.

", "DeleteTrustAnchor": "

Deletes a trust anchor.

Required permissions: rolesanywhere:DeleteTrustAnchor.

", "DisableCrl": "

Disables a certificate revocation list (CRL).

Required permissions: rolesanywhere:DisableCrl.

", - "DisableProfile": "

Disables a profile. When disabled, CreateSession requests with this profile fail.

Required permissions: rolesanywhere:DisableProfile.

", - "DisableTrustAnchor": "

Disables a trust anchor. When disabled, CreateSession requests specifying this trust anchor are unauthorized.

Required permissions: rolesanywhere:DisableTrustAnchor.

", + "DisableProfile": "

Disables a profile. When disabled, temporary credential requests with this profile fail.

Required permissions: rolesanywhere:DisableProfile.

", + "DisableTrustAnchor": "

Disables a trust anchor. When disabled, temporary credential requests specifying this trust anchor are unauthorized.

Required permissions: rolesanywhere:DisableTrustAnchor.

", "EnableCrl": "

Enables a certificate revocation list (CRL). When enabled, certificates stored in the CRL are unauthorized to receive session credentials.

Required permissions: rolesanywhere:EnableCrl.

", - "EnableProfile": "

Enables the roles in a profile to receive session credentials in CreateSession.

Required permissions: rolesanywhere:EnableProfile.

", + "EnableProfile": "

Enables temporary credential requests for a profile.

Required permissions: rolesanywhere:EnableProfile.

", "EnableTrustAnchor": "

Enables a trust anchor. When enabled, certificates in the trust anchor chain are authorized for trust validation.

Required permissions: rolesanywhere:EnableTrustAnchor.

", "GetCrl": "

Gets a certificate revocation list (CRL).

Required permissions: rolesanywhere:GetCrl.

", "GetProfile": "

Gets a profile.

Required permissions: rolesanywhere:GetProfile.

", - "GetSubject": "

Gets a Subject. A Subject associates a certificate identity with authentication attempts by CreateSession. The Subject resources stores audit information such as status of the last authentication attempt, the certificate data used in the attempt, and the last time the associated identity attempted authentication.

Required permissions: rolesanywhere:GetSubject.

", + "GetSubject": "

Gets a subject, which associates a certificate identity with authentication attempts. The subject stores auditing information such as the status of the last authentication attempt, the certificate data used in the attempt, and the last time the associated identity attempted authentication.

Required permissions: rolesanywhere:GetSubject.

", "GetTrustAnchor": "

Gets a trust anchor.

Required permissions: rolesanywhere:GetTrustAnchor.

", - "ImportCrl": "

Imports the certificate revocation list (CRL). CRl is a list of certificates that have been revoked by the issuing certificate Authority (CA). IAM Roles Anywhere validates against the crl list before issuing credentials.

Required permissions: rolesanywhere:ImportCrl.

", - "ListCrls": "

Lists all Crls in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListCrls.

", + "ImportCrl": "

Imports the certificate revocation list (CRL). A CRL is a list of certificates that have been revoked by the issuing certificate Authority (CA). IAM Roles Anywhere validates against the CRL before issuing credentials.

Required permissions: rolesanywhere:ImportCrl.

", + "ListCrls": "

Lists all certificate revocation lists (CRL) in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListCrls.

", "ListProfiles": "

Lists all profiles in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListProfiles.

", "ListSubjects": "

Lists the subjects in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListSubjects.

", "ListTagsForResource": "

Lists the tags attached to the resource.

Required permissions: rolesanywhere:ListTagsForResource.

", "ListTrustAnchors": "

Lists the trust anchors in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListTrustAnchors.

", + "PutNotificationSettings": "

Attaches a list of notification settings to a trust anchor.

A notification setting includes information such as event name, threshold, status of the notification setting, and the channel to notify.

Required permissions: rolesanywhere:PutNotificationSettings.

", + "ResetNotificationSettings": "

Resets the custom notification setting to IAM Roles Anywhere default setting.

Required permissions: rolesanywhere:ResetNotificationSettings.

", "TagResource": "

Attaches tags to a resource.

Required permissions: rolesanywhere:TagResource.

", "UntagResource": "

Removes tags from the resource.

Required permissions: rolesanywhere:UntagResource.

", - "UpdateCrl": "

Updates the certificate revocation list (CRL). CRl is a list of certificates that have been revoked by the issuing certificate Authority (CA). IAM Roles Anywhere validates against the crl list before issuing credentials.

Required permissions: rolesanywhere:UpdateCrl.

", - "UpdateProfile": "

Updates the profile. A profile is configuration resource to list the roles that RolesAnywhere service is trusted to assume. In addition, by applying a profile you can scope-down permissions with IAM managed policies.

Required permissions: rolesanywhere:UpdateProfile.

", - "UpdateTrustAnchor": "

Updates the trust anchor.You establish trust between IAM Roles Anywhere and your certificate authority (CA) by configuring a trust anchor. A Trust Anchor is defined either as a reference to a AWS Certificate Manager Private Certificate Authority (ACM PCA), or by uploading a Certificate Authority (CA) certificate. Your AWS workloads can authenticate with the trust anchor using certificates issued by the trusted Certificate Authority (CA) in exchange for temporary AWS credentials.

Required permissions: rolesanywhere:UpdateTrustAnchor.

" + "UpdateCrl": "

Updates the certificate revocation list (CRL). A CRL is a list of certificates that have been revoked by the issuing certificate authority (CA). IAM Roles Anywhere validates against the CRL before issuing credentials.

Required permissions: rolesanywhere:UpdateCrl.

", + "UpdateProfile": "

Updates a profile, a list of the roles that IAM Roles Anywhere service is trusted to assume. You use profiles to intersect permissions with IAM managed policies.

Required permissions: rolesanywhere:UpdateProfile.

", + "UpdateTrustAnchor": "

Updates a trust anchor. You establish trust between IAM Roles Anywhere and your certificate authority (CA) by configuring a trust anchor. You can define a trust anchor as a reference to an Private Certificate Authority (Private CA) or by uploading a CA certificate. Your Amazon Web Services workloads can authenticate with the trust anchor using certificates issued by the CA in exchange for temporary Amazon Web Services credentials.

Required permissions: rolesanywhere:UpdateTrustAnchor.

" }, "shapes": { "AccessDeniedException": { @@ -53,17 +55,19 @@ "base": null, "refs": { "CreateProfileRequest$enabled": "

Specifies whether the profile is enabled.

", - "CreateProfileRequest$requireInstanceProperties": "

Specifies whether instance properties are required in CreateSession requests with this profile.

", + "CreateProfileRequest$requireInstanceProperties": "

Specifies whether instance properties are required in temporary credential requests with this profile.

", "CreateTrustAnchorRequest$enabled": "

Specifies whether the trust anchor is enabled.

", "CredentialSummary$enabled": "

Indicates whether the credential is enabled.

", - "CredentialSummary$failed": "

Indicates whether the CreateSession operation was successful.

", + "CredentialSummary$failed": "

Indicates whether the temporary credential request was successful.

", "CrlDetail$enabled": "

Indicates whether the certificate revocation list (CRL) is enabled.

", "ImportCrlRequest$enabled": "

Specifies whether the certificate revocation list (CRL) is enabled.

", - "InstanceProperty$failed": "

Indicates whether the CreateSession operation was successful.

", + "InstanceProperty$failed": "

Indicates whether the temporary credential request was successful.

", + "NotificationSetting$enabled": "

Indicates whether the notification setting is enabled.

", + "NotificationSettingDetail$enabled": "

Indicates whether the notification setting is enabled.

", "ProfileDetail$enabled": "

Indicates whether the profile is enabled.

", - "ProfileDetail$requireInstanceProperties": "

Specifies whether instance properties are required in CreateSession requests with this profile.

", + "ProfileDetail$requireInstanceProperties": "

Specifies whether instance properties are required in temporary credential requests with this profile.

", "SubjectDetail$enabled": "

The enabled status of the subject.

", - "SubjectSummary$enabled": "

The enabled status of the Subject.

", + "SubjectSummary$enabled": "

The enabled status of the subject.

", "TrustAnchorDetail$enabled": "

Indicates whether the trust anchor is enabled.

" } }, @@ -86,11 +90,11 @@ "CredentialSummaries": { "base": null, "refs": { - "SubjectDetail$credentials": "

The temporary session credentials vended at the last authenticating call with this Subject.

" + "SubjectDetail$credentials": "

The temporary session credentials vended at the last authenticating call with this subject.

" } }, "CredentialSummary": { - "base": "

A record of a presented X509 credential to CreateSession.

", + "base": "

A record of a presented X509 credential from a temporary credential request.

", "refs": { "CredentialSummaries$member": null } @@ -121,7 +125,7 @@ "ImportCrlRequestCrlDataBlob": { "base": null, "refs": { - "ImportCrlRequest$crlData": "

The x509 v3 specified certificate revocation list

" + "ImportCrlRequest$crlData": "

The x509 v3 specified certificate revocation list (CRL).

" } }, "InstanceProperties": { @@ -179,7 +183,7 @@ "ListRequestNextTokenString": { "base": null, "refs": { - "ListRequest$nextToken": "

A token that indicates where the output should continue from, if a previous operation did not show all results. To get the next results, call the operation again with this value.

" + "ListRequest$nextToken": "

A token that indicates where the output should continue from, if a previous request did not show all results. To get the next results, make the request again with this value.

" } }, "ListSubjectsResponse": { @@ -216,6 +220,77 @@ "ManagedPolicyList$member": null } }, + "NotificationChannel": { + "base": null, + "refs": { + "NotificationSetting$channel": "

The specified channel of notification. IAM Roles Anywhere uses CloudWatch metrics, EventBridge, and Health Dashboard to notify for an event.

In the absence of a specific channel, IAM Roles Anywhere applies this setting to 'ALL' channels.

", + "NotificationSettingDetail$channel": "

The specified channel of notification. IAM Roles Anywhere uses CloudWatch metrics, EventBridge, and Health Dashboard to notify for an event.

In the absence of a specific channel, IAM Roles Anywhere applies this setting to 'ALL' channels.

", + "NotificationSettingKey$channel": "

The specified channel of notification.

" + } + }, + "NotificationEvent": { + "base": null, + "refs": { + "NotificationSetting$event": "

The event to which this notification setting is applied.

", + "NotificationSettingDetail$event": "

The event to which this notification setting is applied.

", + "NotificationSettingKey$event": "

The notification setting event to reset.

" + } + }, + "NotificationSetting": { + "base": "

Customizable notification settings that will be applied to notification events. IAM Roles Anywhere consumes these settings while notifying across multiple channels - CloudWatch metrics, EventBridge, and Health Dashboard.

", + "refs": { + "NotificationSettings$member": null + } + }, + "NotificationSettingDetail": { + "base": "

The state of a notification setting.

A notification setting includes information such as event name, threshold, status of the notification setting, and the channel to notify.

", + "refs": { + "NotificationSettingDetails$member": null + } + }, + "NotificationSettingDetailConfiguredByString": { + "base": null, + "refs": { + "NotificationSettingDetail$configuredBy": "

The principal that configured the notification setting. For default settings configured by IAM Roles Anywhere, the value is rolesanywhere.amazonaws.com, and for customized notifications settings, it is the respective account ID.

" + } + }, + "NotificationSettingDetailThresholdInteger": { + "base": null, + "refs": { + "NotificationSettingDetail$threshold": "

The number of days before a notification event.

" + } + }, + "NotificationSettingDetails": { + "base": null, + "refs": { + "TrustAnchorDetail$notificationSettings": "

A list of notification settings to be associated to the trust anchor.

" + } + }, + "NotificationSettingKey": { + "base": "

A notification setting key to reset. A notification setting key includes the event and the channel.

", + "refs": { + "NotificationSettingKeys$member": null + } + }, + "NotificationSettingKeys": { + "base": null, + "refs": { + "ResetNotificationSettingsRequest$notificationSettingKeys": "

A list of notification setting keys to reset. A notification setting key includes the event and the channel.

" + } + }, + "NotificationSettingThresholdInteger": { + "base": null, + "refs": { + "NotificationSetting$threshold": "

The number of days before a notification event. This value is required for a notification setting that is enabled.

" + } + }, + "NotificationSettings": { + "base": null, + "refs": { + "CreateTrustAnchorRequest$notificationSettings": "

A list of notification settings to be associated to the trust anchor.

", + "PutNotificationSettingsRequest$notificationSettings": "

A list of notification settings to be associated to the trust anchor.

" + } + }, "ProfileArn": { "base": null, "refs": { @@ -240,6 +315,26 @@ "ListProfilesResponse$profiles": "

A list of profiles.

" } }, + "PutNotificationSettingsRequest": { + "base": null, + "refs": { + } + }, + "PutNotificationSettingsResponse": { + "base": null, + "refs": { + } + }, + "ResetNotificationSettingsRequest": { + "base": null, + "refs": { + } + }, + "ResetNotificationSettingsResponse": { + "base": null, + "refs": { + } + }, "ResourceName": { "base": null, "refs": { @@ -267,9 +362,9 @@ "RoleArnList": { "base": null, "refs": { - "CreateProfileRequest$roleArns": "

A list of IAM roles that this profile can assume in a CreateSession operation.

", - "ProfileDetail$roleArns": "

A list of IAM roles that this profile can assume in a CreateSession operation.

", - "UpdateProfileRequest$roleArns": "

A list of IAM roles that this profile can assume in a CreateSession operation.

" + "CreateProfileRequest$roleArns": "

A list of IAM roles that this profile can assume in a temporary credential request.

", + "ProfileDetail$roleArns": "

A list of IAM roles that this profile can assume in a temporary credential request.

", + "UpdateProfileRequest$roleArns": "

A list of IAM roles that this profile can assume in a temporary credential request.

" } }, "ScalarCrlRequest": { @@ -306,6 +401,12 @@ "Source$sourceData": "

The data field of the trust anchor depending on its type.

" } }, + "SourceDataX509CertificateDataString": { + "base": null, + "refs": { + "SourceData$x509CertificateData": "

The PEM-encoded data for the certificate anchor. Included for trust anchors of type CERTIFICATE_BUNDLE.

" + } + }, "String": { "base": null, "refs": { @@ -317,15 +418,14 @@ "CrlDetail$crlArn": "

The ARN of the certificate revocation list (CRL).

", "CrlDetail$name": "

The name of the certificate revocation list (CRL).

", "CrlDetail$trustAnchorArn": "

The ARN of the TrustAnchor the certificate revocation list (CRL) will provide revocation for.

", - "ListCrlsResponse$nextToken": "

A token that indicates where the output should continue from, if a previous operation did not show all results. To get the next results, call the operation again with this value.

", - "ListProfilesResponse$nextToken": "

A token that indicates where the output should continue from, if a previous operation did not show all results. To get the next results, call the operation again with this value.

", - "ListSubjectsResponse$nextToken": "

A token that indicates where the output should continue from, if a previous operation did not show all results. To get the next results, call the operation again with this value.

", - "ListTrustAnchorsResponse$nextToken": "

A token that indicates where the output should continue from, if a previous operation did not show all results. To get the next results, call the operation again with this value.

", + "ListCrlsResponse$nextToken": "

A token that indicates where the output should continue from, if a previous request did not show all results. To get the next results, make the request again with this value.

", + "ListProfilesResponse$nextToken": "

A token that indicates where the output should continue from, if a previous request did not show all results. To get the next results, make the request again with this value.

", + "ListSubjectsResponse$nextToken": "

A token that indicates where the output should continue from, if a previous request did not show all results. To get the next results, make the request again with this value.

", + "ListTrustAnchorsResponse$nextToken": "

A token that indicates where the output should continue from, if a previous request did not show all results. To get the next results, make the request again with this value.

", "ProfileDetail$createdBy": "

The Amazon Web Services account that created the profile.

", "ProfileDetail$sessionPolicy": "

A session policy that applies to the trust boundary of the vended session credentials.

", "ResourceNotFoundException$message": null, - "SourceData$acmPcaArn": "

The root certificate of the Certificate Manager Private Certificate Authority specified by this ARN is used in trust validation for CreateSession operations. Included for trust anchors of type AWS_ACM_PCA.

", - "SourceData$x509CertificateData": "

The PEM-encoded data for the certificate anchor. Included for trust anchors of type CERTIFICATE_BUNDLE.

", + "SourceData$acmPcaArn": "

The root certificate of the Private Certificate Authority specified by this ARN is used in trust validation for temporary credential requests. Included for trust anchors of type AWS_ACM_PCA.

", "SubjectDetail$subjectArn": "

The ARN of the resource.

", "SubjectDetail$x509Subject": "

The x509 principal identifier of the authenticating certificate.

", "SubjectSummary$subjectArn": "

The ARN of the resource.

", @@ -353,7 +453,7 @@ } }, "SubjectSummary": { - "base": "

A summary representation of Subject resources returned in read operations; primarily ListSubjects.

", + "base": "

A summary representation of subjects.

", "refs": { "SubjectSummaries$member": null } @@ -361,17 +461,17 @@ "SyntheticTimestamp_date_time": { "base": null, "refs": { - "CredentialSummary$seenAt": "

The ISO-8601 time stamp of when the certificate was last used in a CreateSession operation.

", + "CredentialSummary$seenAt": "

The ISO-8601 time stamp of when the certificate was last used in a temporary credential request.

", "CrlDetail$createdAt": "

The ISO-8601 timestamp when the certificate revocation list (CRL) was created.

", "CrlDetail$updatedAt": "

The ISO-8601 timestamp when the certificate revocation list (CRL) was last updated.

", - "InstanceProperty$seenAt": "

The ISO-8601 time stamp of when the certificate was last used in a CreateSession operation.

", + "InstanceProperty$seenAt": "

The ISO-8601 time stamp of when the certificate was last used in a temporary credential request.

", "ProfileDetail$createdAt": "

The ISO-8601 timestamp when the profile was created.

", "ProfileDetail$updatedAt": "

The ISO-8601 timestamp when the profile was last updated.

", "SubjectDetail$createdAt": "

The ISO-8601 timestamp when the subject was created.

", - "SubjectDetail$lastSeenAt": "

The ISO-8601 timestamp of the last time this Subject requested temporary session credentials.

", + "SubjectDetail$lastSeenAt": "

The ISO-8601 timestamp of the last time this subject requested temporary session credentials.

", "SubjectDetail$updatedAt": "

The ISO-8601 timestamp when the subject was last updated.

", - "SubjectSummary$createdAt": "

The ISO-8601 time stamp of when the certificate was first used in a CreateSession operation.

", - "SubjectSummary$lastSeenAt": "

The ISO-8601 time stamp of when the certificate was last used in a CreateSession operation.

", + "SubjectSummary$createdAt": "

The ISO-8601 time stamp of when the certificate was first used in a temporary credential request.

", + "SubjectSummary$lastSeenAt": "

The ISO-8601 time stamp of when the certificate was last used in a temporary credential request.

", "SubjectSummary$updatedAt": "

The ISO-8601 timestamp when the subject was last updated.

", "TrustAnchorDetail$createdAt": "

The ISO-8601 timestamp when the trust anchor was created.

", "TrustAnchorDetail$updatedAt": "

The ISO-8601 timestamp when the trust anchor was last updated.

" @@ -436,6 +536,8 @@ "TrustAnchorDetail": { "base": "

The state of the trust anchor after a read or write operation.

", "refs": { + "PutNotificationSettingsResponse$trustAnchor": null, + "ResetNotificationSettingsResponse$trustAnchor": null, "TrustAnchorDetailResponse$trustAnchor": "

The state of the trust anchor after a read or write operation.

", "TrustAnchorDetails$member": null } @@ -475,7 +577,7 @@ "UpdateCrlRequestCrlDataBlob": { "base": null, "refs": { - "UpdateCrlRequest$crlData": "

The x509 v3 specified certificate revocation list

" + "UpdateCrlRequest$crlData": "

The x509 v3 specified certificate revocation list (CRL).

" } }, "UpdateProfileRequest": { @@ -505,6 +607,8 @@ "refs": { "CrlDetail$crlId": "

The unique identifier of the certificate revocation list (CRL).

", "ProfileDetail$profileId": "

The unique identifier of the profile.

", + "PutNotificationSettingsRequest$trustAnchorId": "

The unique identifier of the trust anchor.

", + "ResetNotificationSettingsRequest$trustAnchorId": "

The unique identifier of the trust anchor.

", "ScalarCrlRequest$crlId": "

The unique identifier of the certificate revocation list (CRL).

", "ScalarProfileRequest$profileId": "

The unique identifier of the profile.

", "ScalarSubjectRequest$subjectId": "

The unique identifier of the subject.

", diff --git a/models/apis/rolesanywhere/2018-05-10/endpoint-rule-set-1.json b/models/apis/rolesanywhere/2018-05-10/endpoint-rule-set-1.json new file mode 100644 index 00000000000..8c80f513d77 --- /dev/null +++ b/models/apis/rolesanywhere/2018-05-10/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://rolesanywhere-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://rolesanywhere-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://rolesanywhere.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://rolesanywhere.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/models/apis/rolesanywhere/2018-05-10/endpoint-tests-1.json b/models/apis/rolesanywhere/2018-05-10/endpoint-tests-1.json new file mode 100644 index 00000000000..b5000b3a39f --- /dev/null +++ b/models/apis/rolesanywhere/2018-05-10/endpoint-tests-1.json @@ -0,0 +1,548 @@ +{ + "testCases": [ + { + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.ap-east-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.ap-northeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.ap-northeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.ap-south-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.ap-southeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.ap-southeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.eu-central-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.eu-north-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.eu-west-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.eu-west-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.eu-west-3.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.me-south-1.amazonaws.com" + } + }, + "params": { + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.sa-east-1.amazonaws.com" + } + }, + "params": { + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://rolesanywhere.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/transfer/2018-11-05/api-2.json b/models/apis/transfer/2018-11-05/api-2.json index 107cd0d3214..1c979999a5b 100644 --- a/models/apis/transfer/2018-11-05/api-2.json +++ b/models/apis/transfer/2018-11-05/api-2.json @@ -1951,7 +1951,8 @@ "Url":{"shape":"Url"}, "InvocationRole":{"shape":"Role"}, "DirectoryId":{"shape":"DirectoryId"}, - "Function":{"shape":"Function"} + "Function":{"shape":"Function"}, + "SftpAuthenticationMethods":{"shape":"SftpAuthenticationMethods"} } }, "IdentityProviderType":{ @@ -2777,6 +2778,15 @@ "ENABLE_NO_OP" ] }, + "SftpAuthenticationMethods":{ + "type":"string", + "enum":[ + "PASSWORD", + "PUBLIC_KEY", + "PUBLIC_KEY_OR_PASSWORD", + "PUBLIC_KEY_AND_PASSWORD" + ] + }, "SigningAlg":{ "type":"string", "enum":[ diff --git a/models/apis/transfer/2018-11-05/docs-2.json b/models/apis/transfer/2018-11-05/docs-2.json index 0d31bc8dceb..39c3344daae 100644 --- a/models/apis/transfer/2018-11-05/docs-2.json +++ b/models/apis/transfer/2018-11-05/docs-2.json @@ -13,7 +13,7 @@ "DeleteAgreement": "

Delete the agreement that's specified in the provided AgreementId.

", "DeleteCertificate": "

Deletes the certificate that's specified in the CertificateId parameter.

", "DeleteConnector": "

Deletes the agreement that's specified in the provided ConnectorId.

", - "DeleteHostKey": "

Deletes the host key that's specified in the HoskKeyId parameter.

", + "DeleteHostKey": "

Deletes the host key that's specified in the HostKeyId parameter.

", "DeleteProfile": "

Deletes the profile that's specified in the ProfileId parameter.

", "DeleteServer": "

Deletes the file transfer protocol-enabled server that you specify.

No response returns from this operation.

", "DeleteSshPublicKey": "

Deletes a user's Secure Shell (SSH) public key.

", @@ -23,7 +23,7 @@ "DescribeAgreement": "

Describes the agreement that's identified by the AgreementId.

", "DescribeCertificate": "

Describes the certificate that's identified by the CertificateId.

", "DescribeConnector": "

Describes the connector that's identified by the ConnectorId.

", - "DescribeExecution": "

You can use DescribeExecution to check the details of the execution of the specified workflow.

", + "DescribeExecution": "

You can use DescribeExecution to check the details of the execution of the specified workflow.

This API call only returns details for in-progress workflows.

If you provide an ID for an execution that is not in progress, or if the execution doesn't match the specified workflow ID, you receive a ResourceNotFound exception.

", "DescribeHostKey": "

Returns the details of the host key that's specified by the HostKeyId and ServerId.

", "DescribeProfile": "

Returns the details of the profile that's specified by the ProfileId.

", "DescribeSecurityPolicy": "

Describes the security policy that is attached to your file transfer protocol-enabled server. The response contains a description of the security policy's properties. For more information about security policies, see Working with security policies.

", @@ -32,25 +32,25 @@ "DescribeWorkflow": "

Describes the specified workflow.

", "ImportCertificate": "

Imports the signing and encryption certificates that you need to create local (AS2) profiles and partner profiles.

", "ImportHostKey": "

Adds a host key to the server that's specified by the ServerId parameter.

", - "ImportSshPublicKey": "

Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to the specific file transfer protocol-enabled server, identified by ServerId.

The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId.

", + "ImportSshPublicKey": "

Adds a Secure Shell (SSH) public key to a Transfer Family user identified by a UserName value assigned to the specific file transfer protocol-enabled server, identified by ServerId.

The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId.

", "ListAccesses": "

Lists the details for all the accesses you have on your server.

", "ListAgreements": "

Returns a list of the agreements for the server that's identified by the ServerId that you supply. If you want to limit the results to a certain number, supply a value for the MaxResults parameter. If you ran the command previously and received a value for NextToken, you can supply that value to continue listing agreements from where you left off.

", "ListCertificates": "

Returns a list of the current certificates that have been imported into Transfer Family. If you want to limit the results to a certain number, supply a value for the MaxResults parameter. If you ran the command previously and received a value for the NextToken parameter, you can supply that value to continue listing certificates from where you left off.

", "ListConnectors": "

Lists the connectors for the specified Region.

", - "ListExecutions": "

Lists all executions for the specified workflow.

", + "ListExecutions": "

Lists all in-progress executions for the specified workflow.

If the specified workflow ID cannot be found, ListExecutions returns a ResourceNotFound exception.

", "ListHostKeys": "

Returns a list of host keys for the server that's specified by the ServerId parameter.

", "ListProfiles": "

Returns a list of the profiles for your system. If you want to limit the results to a certain number, supply a value for the MaxResults parameter. If you ran the command previously and received a value for NextToken, you can supply that value to continue listing profiles from where you left off.

", "ListSecurityPolicies": "

Lists the security policies that are attached to your file transfer protocol-enabled servers.

", "ListServers": "

Lists the file transfer protocol-enabled servers that are associated with your Amazon Web Services account.

", "ListTagsForResource": "

Lists all of the tags associated with the Amazon Resource Name (ARN) that you specify. The resource can be a user, server, or role.

", "ListUsers": "

Lists the users for a file transfer protocol-enabled server that you specify by passing the ServerId parameter.

", - "ListWorkflows": "

Lists all of your workflows.

", + "ListWorkflows": "

Lists all workflows associated with your Amazon Web Services account for your current region.

", "SendWorkflowStepState": "

Sends a callback for asynchronous custom steps.

The ExecutionId, WorkflowId, and Token are passed to the target resource during execution of a custom step of a workflow. You must include those with their callback as well as providing a status.

", "StartFileTransfer": "

Begins an outbound file transfer to a remote AS2 server. You specify the ConnectorId and the file paths for where to send the files.

", "StartServer": "

Changes the state of a file transfer protocol-enabled server from OFFLINE to ONLINE. It has no impact on a server that is already ONLINE. An ONLINE server can accept and process file transfer jobs.

The state of STARTING indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED can indicate an error condition.

No response is returned from this call.

", "StopServer": "

Changes the state of a file transfer protocol-enabled server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server, such as server and user properties, are not affected by stopping your server.

Stopping the server does not reduce or impact your file transfer protocol endpoint billing; you must delete the server to stop being billed.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

", "TagResource": "

Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.

There is no response returned from this call.

", - "TestIdentityProvider": "

If the IdentityProviderType of a file transfer protocol-enabled server is AWS_DIRECTORY_SERVICE or API_Gateway, tests whether your identity provider is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the identity provider integration to ensure that your users can successfully use the service.

The ServerId and UserName parameters are required. The ServerProtocol, SourceIp, and UserPassword are all optional.

You cannot use TestIdentityProvider if the IdentityProviderType of your server is SERVICE_MANAGED.

", + "TestIdentityProvider": "

If the IdentityProviderType of a file transfer protocol-enabled server is AWS_DIRECTORY_SERVICE or API_Gateway, tests whether your identity provider is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the identity provider integration to ensure that your users can successfully use the service.

The ServerId and UserName parameters are required. The ServerProtocol, SourceIp, and UserPassword are all optional.

Note the following:

", "UntagResource": "

Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.

No response is returned from this call.

", "UpdateAccess": "

Allows you to update parameters for the access specified in the ServerID and ExternalID parameters.

", "UpdateAgreement": "

Updates some of the parameters for an existing agreement. Provide the AgreementId and the ServerId for the agreement that you want to update, along with the new values for the parameters to update.

", @@ -360,7 +360,7 @@ "CustomStepTarget": { "base": null, "refs": { - "CustomStepDetails$Target": "

The ARN for the lambda function that is being called.

" + "CustomStepDetails$Target": "

The ARN for the Lambda function that is being called.

" } }, "CustomStepTimeoutSeconds": { @@ -374,7 +374,7 @@ "refs": { "DescribedHostKey$DateImported": "

The date on which the host key was added to the server.

", "ListedHostKey$DateImported": "

The date on which the host key was added to the server.

", - "SshPublicKey$DateImported": "

Specifies the date that the public key was added to the user account.

" + "SshPublicKey$DateImported": "

Specifies the date that the public key was added to the Transfer Family user.

" } }, "DecryptStepDetails": { @@ -606,7 +606,7 @@ "DescribedUser": { "base": "

Describes the properties of a user that was specified.

", "refs": { - "DescribeUserResponse$User": "

An array containing the properties of the user account for the ServerID value that you specified.

" + "DescribeUserResponse$User": "

An array containing the properties of the Transfer Family user for the ServerID value that you specified.

" } }, "DescribedWorkflow": { @@ -784,7 +784,7 @@ "Function": { "base": null, "refs": { - "IdentityProviderDetails$Function": "

The ARN for a lambda function to use for the Identity provider.

" + "IdentityProviderDetails$Function": "

The ARN for a Lambda function to use for the Identity provider.

" } }, "HomeDirectory": { @@ -880,17 +880,17 @@ "IdentityProviderDetails": { "base": "

Returns information related to the type of user authentication that is in use for a file transfer protocol-enabled server's users. A server can have only one method of authentication.

", "refs": { - "CreateServerRequest$IdentityProviderDetails": "

Required when IdentityProviderType is set to AWS_DIRECTORY_SERVICE or API_GATEWAY. Accepts an array containing all of the information required to use a directory in AWS_DIRECTORY_SERVICE or invoke a customer-supplied authentication API, including the API Gateway URL. Not required when IdentityProviderType is set to SERVICE_MANAGED.

", + "CreateServerRequest$IdentityProviderDetails": "

Required when IdentityProviderType is set to AWS_DIRECTORY_SERVICE, Amazon Web Services_LAMBDA or API_GATEWAY. Accepts an array containing all of the information required to use a directory in AWS_DIRECTORY_SERVICE or invoke a customer-supplied authentication API, including the API Gateway URL. Not required when IdentityProviderType is set to SERVICE_MANAGED.

", "DescribedServer$IdentityProviderDetails": "

Specifies information to call a customer-supplied authentication API. This field is not populated when the IdentityProviderType of a server is AWS_DIRECTORY_SERVICE or SERVICE_MANAGED.

", "UpdateServerRequest$IdentityProviderDetails": "

An array containing all of the information required to call a customer's authentication API method.

" } }, "IdentityProviderType": { - "base": "

Returns information related to the type of user authentication that is in use for a file transfer protocol-enabled server's users. For AWS_DIRECTORY_SERVICE or SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are stored with a user on the server instance. For API_GATEWAY authentication, your custom authentication method is implemented by using an API call. The server can have only one method of authentication.

", + "base": "

The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.

Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.

Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.

Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter for the IdentityProviderDetails data type.

", "refs": { - "CreateServerRequest$IdentityProviderType": "

The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.

Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.

Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.

Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter or the IdentityProviderDetails data type.

", - "DescribedServer$IdentityProviderType": "

The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.

Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.

Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.

Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter or the IdentityProviderDetails data type.

", - "ListedServer$IdentityProviderType": "

The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.

Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.

Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.

Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter or the IdentityProviderDetails data type.

" + "CreateServerRequest$IdentityProviderType": "

The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.

Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.

Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.

Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter for the IdentityProviderDetails data type.

", + "DescribedServer$IdentityProviderType": "

The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.

Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.

Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.

Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter for the IdentityProviderDetails data type.

", + "ListedServer$IdentityProviderType": "

The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.

Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.

Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.

Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter for the IdentityProviderDetails data type.

" } }, "ImportCertificateRequest": { @@ -926,8 +926,8 @@ "InputFileLocation": { "base": "

Specifies the location for the file that's being processed.

", "refs": { - "CopyStepDetails$DestinationFileLocation": "

Specifies the location for the file being copied. Use ${Transfer:username} or ${Transfer:UploadDate} in this field to parametrize the destination prefix by username or uploaded date.

", - "DecryptStepDetails$DestinationFileLocation": null + "CopyStepDetails$DestinationFileLocation": "

Specifies the location for the file being copied. Use ${Transfer:UserName} or ${Transfer:UploadDate} in this field to parametrize the destination prefix by username or uploaded date.

", + "DecryptStepDetails$DestinationFileLocation": "

Specifies the location for the file being decrypted. Use ${Transfer:UserName} or ${Transfer:UploadDate} in this field to parametrize the destination prefix by username or uploaded date.

" } }, "InternalServiceError": { @@ -1122,7 +1122,7 @@ "ListedExecutions": { "base": null, "refs": { - "ListExecutionsResponse$Executions": "

Returns the details for each execution.

" + "ListExecutionsResponse$Executions": "

Returns the details for each execution, in a ListedExecution array.

" } }, "ListedHostKey": { @@ -1170,7 +1170,7 @@ "ListedUsers": { "base": null, "refs": { - "ListUsersResponse$Users": "

Returns the user accounts and their properties for the ServerId value that you specify.

" + "ListUsersResponse$Users": "

Returns the Transfer Family users and their properties for the ServerId value that you specify.

" } }, "ListedWorkflow": { @@ -1306,8 +1306,8 @@ "OverwriteExisting": { "base": null, "refs": { - "CopyStepDetails$OverwriteExisting": "

A flag that indicates whether to overwrite an existing file of the same name. The default is FALSE.

", - "DecryptStepDetails$OverwriteExisting": "

A flag that indicates whether to overwrite an existing file of the same name. The default is FALSE.

" + "CopyStepDetails$OverwriteExisting": "

A flag that indicates whether to overwrite an existing file of the same name. The default is FALSE.

If the workflow is processing a file that has the same name as an existing file, the behavior is as follows:

", + "DecryptStepDetails$OverwriteExisting": "

A flag that indicates whether to overwrite an existing file of the same name. The default is FALSE.

If the workflow is processing a file that has the same name as an existing file, the behavior is as follows:

" } }, "PassiveIp": { @@ -1404,7 +1404,7 @@ "base": null, "refs": { "Protocols$member": null, - "TestIdentityProviderRequest$ServerProtocol": "

The type of file transfer protocol to be tested.

The available protocols are:

" + "TestIdentityProviderRequest$ServerProtocol": "

The type of file transfer protocol to be tested.

The available protocols are:

" } }, "ProtocolDetails": { @@ -1450,7 +1450,7 @@ "Response": { "base": null, "refs": { - "TestIdentityProviderResponse$Response": "

The response that is returned from your API Gateway.

" + "TestIdentityProviderResponse$Response": "

The response that is returned from your API Gateway or your Lambda function.

" } }, "RetryAfterSeconds": { @@ -1475,7 +1475,7 @@ "DescribedExecution$ExecutionRole": "

The IAM role associated with the execution.

", "DescribedServer$LoggingRole": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs.

", "DescribedUser$Role": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.

", - "IdentityProviderDetails$InvocationRole": "

Provides the type of InvocationRole used to authenticate the user account.

", + "IdentityProviderDetails$InvocationRole": "

This parameter is only applicable if your IdentityProviderType is API_GATEWAY. Provides the type of InvocationRole used to authenticate the user account.

", "ListedAccess$Role": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.

", "ListedServer$LoggingRole": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs.

", "ListedUser$Role": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.

The IAM role that controls your users' access to your Amazon S3 bucket for servers with Domain=S3, or your EFS file system for servers with Domain=EFS.

The policies attached to this role determine the level of access you want to provide your users when transferring files into and out of your S3 buckets or EFS file systems.

", @@ -1655,10 +1655,10 @@ "UpdateAgreementRequest$ServerId": "

A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses.

", "UpdateHostKeyRequest$ServerId": "

The identifier of the server that contains the host key that you are updating.

", "UpdateHostKeyResponse$ServerId": "

Returns the server identifier for the server that contains the updated host key.

", - "UpdateServerRequest$ServerId": "

A system-assigned unique identifier for a server instance that the user account is assigned to.

", - "UpdateServerResponse$ServerId": "

A system-assigned unique identifier for a server that the user account is assigned to.

", - "UpdateUserRequest$ServerId": "

A system-assigned unique identifier for a server instance that the user account is assigned to.

", - "UpdateUserResponse$ServerId": "

A system-assigned unique identifier for a server instance that the user account is assigned to.

", + "UpdateServerRequest$ServerId": "

A system-assigned unique identifier for a server instance that the Transfer Family user is assigned to.

", + "UpdateServerResponse$ServerId": "

A system-assigned unique identifier for a server that the Transfer Family user is assigned to.

", + "UpdateUserRequest$ServerId": "

A system-assigned unique identifier for a Transfer Family server instance that the user is assigned to.

", + "UpdateUserResponse$ServerId": "

A system-assigned unique identifier for a Transfer Family server instance that the account is assigned to.

", "UserDetails$ServerId": "

The system-assigned unique identifier for a Transfer server instance.

" } }, @@ -1693,6 +1693,12 @@ "ProtocolDetails$SetStatOption": "

Use the SetStatOption to ignore the error that is generated when the client attempts to use SETSTAT on a file you are uploading to an S3 bucket.

Some SFTP file transfer clients can attempt to change the attributes of remote files, including timestamp and permissions, using commands, such as SETSTAT when uploading the file. However, these commands are not compatible with object storage systems, such as Amazon S3. Due to this incompatibility, file uploads from these clients can result in errors even when the file is otherwise successfully uploaded.

Set the value to ENABLE_NO_OP to have the Transfer Family server ignore the SETSTAT command, and upload files without needing to make any changes to your SFTP client. While the SetStatOption ENABLE_NO_OP setting ignores the error, it does generate a log entry in Amazon CloudWatch Logs, so you can determine when the client is making a SETSTAT call.

If you want to preserve the original timestamp for your file, and modify other file attributes using SETSTAT, you can use Amazon EFS as backend storage with Transfer Family.

" } }, + "SftpAuthenticationMethods": { + "base": null, + "refs": { + "IdentityProviderDetails$SftpAuthenticationMethods": "

For SFTP-enabled servers, and for custom identity providers only, you can specify whether to authenticate using a password, SSH key pair, or both.

" + } + }, "SigningAlg": { "base": null, "refs": { @@ -1712,11 +1718,11 @@ "SourceIp": { "base": null, "refs": { - "TestIdentityProviderRequest$SourceIp": "

The source IP address of the user account to be tested.

" + "TestIdentityProviderRequest$SourceIp": "

The source IP address of the account to be tested.

" } }, "SshPublicKey": { - "base": "

Provides information about the public Secure Shell (SSH) key that is associated with a user account for the specific file transfer protocol-enabled server (as identified by ServerId). The information returned includes the date the key was imported, the public key contents, and the public key ID. A user can store more than one SSH public key associated with their user name on a specific server.

", + "base": "

Provides information about the public Secure Shell (SSH) key that is associated with a Transfer Family user for the specific file transfer protocol-enabled server (as identified by ServerId). The information returned includes the date the key was imported, the public key contents, and the public key ID. A user can store more than one SSH public key associated with their user name on a specific server.

", "refs": { "SshPublicKeys$member": null } @@ -1774,7 +1780,7 @@ "StatusCode": { "base": null, "refs": { - "TestIdentityProviderResponse$StatusCode": "

The HTTP status code that is the response from your API Gateway.

" + "TestIdentityProviderResponse$StatusCode": "

The HTTP status code that is the response from your API Gateway or your Lambda function.

" } }, "StepResultOutputsJson": { @@ -1856,7 +1862,7 @@ "ImportCertificateRequest$Tags": "

Key-value pairs that can be used to group and search for certificates.

", "ImportHostKeyRequest$Tags": "

Key-value pairs that can be used to group and search for host keys.

", "ListTagsForResourceResponse$Tags": "

Key-value pairs that are assigned to a resource, usually for the purpose of grouping and searching for items. Tags are metadata that you define.

", - "TagResourceRequest$Tags": "

Key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to user accounts for any purpose.

" + "TagResourceRequest$Tags": "

Key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (servers, users, workflows, and so on) for any purpose.

" } }, "TestIdentityProviderRequest": { @@ -1999,24 +2005,24 @@ "base": null, "refs": { "CreateUserRequest$UserName": "

A unique string that identifies a user and is associated with a ServerId. This user name must be a minimum of 3 and a maximum of 100 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen '-', period '.', and at sign '@'. The user name can't start with a hyphen, period, or at sign.

", - "CreateUserResponse$UserName": "

A unique string that identifies a user account associated with a server.

", + "CreateUserResponse$UserName": "

A unique string that identifies a Transfer Family user.

", "DeleteSshPublicKeyRequest$UserName": "

A unique string that identifies a user whose public key is being deleted.

", "DeleteUserRequest$UserName": "

A unique string that identifies a user that is being deleted from a server.

", "DescribeUserRequest$UserName": "

The name of the user assigned to one or more servers. User names are part of the sign-in credentials to use the Transfer Family service and perform file transfer tasks.

", "DescribedUser$UserName": "

Specifies the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your server.

", - "ImportSshPublicKeyRequest$UserName": "

The name of the user account that is assigned to one or more servers.

", + "ImportSshPublicKeyRequest$UserName": "

The name of the Transfer Family user that is assigned to one or more servers.

", "ImportSshPublicKeyResponse$UserName": "

A user name assigned to the ServerID value that you specified.

", "ListedUser$UserName": "

Specifies the name of the user whose ARN was specified. User names are used for authentication purposes.

", - "TestIdentityProviderRequest$UserName": "

The name of the user account to be tested.

", + "TestIdentityProviderRequest$UserName": "

The name of the account to be tested.

", "UpdateUserRequest$UserName": "

A unique string that identifies a user and is associated with a server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 100 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen '-', period '.', and at sign '@'. The user name can't start with a hyphen, period, or at sign.

", "UpdateUserResponse$UserName": "

The unique identifier for a user that is assigned to a server instance that was specified in the request.

", - "UserDetails$UserName": "

A unique string that identifies a user account associated with a server.

" + "UserDetails$UserName": "

A unique string that identifies a Transfer Family user associated with a server.

" } }, "UserPassword": { "base": null, "refs": { - "TestIdentityProviderRequest$UserPassword": "

The password of the user account to be tested.

" + "TestIdentityProviderRequest$UserPassword": "

The password of the account to be tested.

" } }, "VpcEndpointId": { @@ -2040,7 +2046,7 @@ } }, "WorkflowDetail": { - "base": "

Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects.

", + "base": "

Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.

", "refs": { "OnPartialUploadWorkflowDetails$member": null, "OnUploadWorkflowDetails$member": null @@ -2049,9 +2055,9 @@ "WorkflowDetails": { "base": "

Container for the WorkflowDetail data type. It is used by actions that trigger a workflow to begin execution.

", "refs": { - "CreateServerRequest$WorkflowDetails": "

Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects.

", - "DescribedServer$WorkflowDetails": "

Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects.

", - "UpdateServerRequest$WorkflowDetails": "

Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects.

To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.

aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'

" + "CreateServerRequest$WorkflowDetails": "

Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.

", + "DescribedServer$WorkflowDetails": "

Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.

", + "UpdateServerRequest$WorkflowDetails": "

Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.

To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.

aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'

" } }, "WorkflowId": { @@ -2097,7 +2103,7 @@ "base": null, "refs": { "CreateWorkflowRequest$Steps": "

Specifies the details for the steps that are in the specified workflow.

The TYPE specifies which of the following actions is being taken for this step.

Currently, copying and tagging are supported only on S3.

For file location, you specify either the Amazon S3 bucket and key, or the Amazon EFS file system ID and path.

", - "CreateWorkflowRequest$OnExceptionSteps": "

Specifies the steps (actions) to take if errors are encountered during execution of the workflow.

For custom steps, the lambda function needs to send FAILURE to the call back API to kick off the exception steps. Additionally, if the lambda does not send SUCCESS before it times out, the exception steps are executed.

", + "CreateWorkflowRequest$OnExceptionSteps": "

Specifies the steps (actions) to take if errors are encountered during execution of the workflow.

For custom steps, the Lambda function needs to send FAILURE to the call back API to kick off the exception steps. Additionally, if the Lambda does not send SUCCESS before it times out, the exception steps are executed.

", "DescribedWorkflow$Steps": "

Specifies the details for the steps that are in the specified workflow.

", "DescribedWorkflow$OnExceptionSteps": "

Specifies the steps (actions) to take if errors are encountered during execution of the workflow.

" } diff --git a/models/apis/transfer/2018-11-05/endpoint-rule-set-1.json b/models/apis/transfer/2018-11-05/endpoint-rule-set-1.json index 62e04723b35..6f1477c015b 100644 --- a/models/apis/transfer/2018-11-05/endpoint-rule-set-1.json +++ b/models/apis/transfer/2018-11-05/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,154 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://transfer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://transfer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://transfer-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://transfer-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://transfer.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -286,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://transfer.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://transfer.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -295,28 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://transfer.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/transfer/2018-11-05/endpoint-tests-1.json b/models/apis/transfer/2018-11-05/endpoint-tests-1.json index 28a47d68408..5dffbbb040e 100644 --- a/models/apis/transfer/2018-11-05/endpoint-tests-1.json +++ b/models/apis/transfer/2018-11-05/endpoint-tests-1.json @@ -9,8 +9,8 @@ }, "params": { "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "eu-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -373,8 +373,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -386,8 +386,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -399,8 +399,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -412,8 +412,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -425,8 +425,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -438,8 +438,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -451,8 +451,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -464,8 +464,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -477,8 +477,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -490,8 +490,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -503,8 +503,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -516,8 +527,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -529,8 +551,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -542,8 +575,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -555,12 +599,12 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -568,8 +612,21 @@ }, "params": { "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -580,8 +637,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -592,10 +649,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index ce59619357a..59da1fba439 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -21990,8 +21990,26 @@ }, "route53resolver" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "route53resolver.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "deprecated" : true, + "hostname" : "route53resolver.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "route53resolver.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "deprecated" : true, + "hostname" : "route53resolver.us-gov-west-1.amazonaws.com" + } } }, "runtime.lex" : { diff --git a/service/athena/api.go b/service/athena/api.go index dfa7eea7f16..f6a8e45fe6d 100644 --- a/service/athena/api.go +++ b/service/athena/api.go @@ -9557,6 +9557,10 @@ type EngineConfiguration struct { // // MaxConcurrentDpus is a required field MaxConcurrentDpus *int64 `min:"2" type:"integer" required:"true"` + + // Specifies custom jar files and Spark properties for use cases like cluster + // encryption, table formats, and general Spark tuning. + SparkProperties map[string]*string `type:"map"` } // String returns the string representation. @@ -9623,6 +9627,12 @@ func (s *EngineConfiguration) SetMaxConcurrentDpus(v int64) *EngineConfiguration return s } +// SetSparkProperties sets the SparkProperties field's value. +func (s *EngineConfiguration) SetSparkProperties(v map[string]*string) *EngineConfiguration { + s.SparkProperties = v + return s +} + // The Athena engine version for running queries, or the PySpark engine version // for running sessions. type EngineVersion struct { diff --git a/service/rekognition/api.go b/service/rekognition/api.go index 33a02feab4c..9ffd7fa2809 100644 --- a/service/rekognition/api.go +++ b/service/rekognition/api.go @@ -14435,6 +14435,57 @@ func (s *EvaluationResult) SetSummary(v *Summary) *EvaluationResult { return s } +// Indicates the direction the eyes are gazing in (independent of the head pose) +// as determined by its pitch and yaw. +type EyeDirection struct { + _ struct{} `type:"structure"` + + // The confidence that the service has in its predicted eye direction. + Confidence *float64 `type:"float"` + + // Value representing eye direction on the pitch axis. + Pitch *float64 `type:"float"` + + // Value representing eye direction on the yaw axis. + Yaw *float64 `type:"float"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EyeDirection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EyeDirection) GoString() string { + return s.String() +} + +// SetConfidence sets the Confidence field's value. +func (s *EyeDirection) SetConfidence(v float64) *EyeDirection { + s.Confidence = &v + return s +} + +// SetPitch sets the Pitch field's value. +func (s *EyeDirection) SetPitch(v float64) *EyeDirection { + s.Pitch = &v + return s +} + +// SetYaw sets the Yaw field's value. +func (s *EyeDirection) SetYaw(v float64) *EyeDirection { + s.Yaw = &v + return s +} + // Indicates whether or not the eyes on the face are open, and the confidence // level in the determination. type EyeOpen struct { @@ -14647,6 +14698,9 @@ type FaceDetail struct { // For example, a person pretending to have a sad face might not be sad emotionally. Emotions []*Emotion `type:"list"` + // Indicates the direction the eyes are gazing in, as defined by pitch and yaw. + EyeDirection *EyeDirection `type:"structure"` + // Indicates whether or not the face is wearing eye glasses, and the confidence // level in the determination. Eyeglasses *Eyeglasses `type:"structure"` @@ -14741,6 +14795,12 @@ func (s *FaceDetail) SetEmotions(v []*Emotion) *FaceDetail { return s } +// SetEyeDirection sets the EyeDirection field's value. +func (s *FaceDetail) SetEyeDirection(v *EyeDirection) *FaceDetail { + s.EyeDirection = v + return s +} + // SetEyeglasses sets the Eyeglasses field's value. func (s *FaceDetail) SetEyeglasses(v *Eyeglasses) *FaceDetail { s.Eyeglasses = v @@ -25920,6 +25980,9 @@ const ( // AttributeEmotions is a Attribute enum value AttributeEmotions = "EMOTIONS" + // AttributeEyeDirection is a Attribute enum value + AttributeEyeDirection = "EYE_DIRECTION" + // AttributeEyeglasses is a Attribute enum value AttributeEyeglasses = "EYEGLASSES" @@ -25953,6 +26016,7 @@ func Attribute_Values() []string { AttributeAgeRange, AttributeBeard, AttributeEmotions, + AttributeEyeDirection, AttributeEyeglasses, AttributeEyesOpen, AttributeGender, diff --git a/service/rolesanywhere/api.go b/service/rolesanywhere/api.go index b58b045d4fd..734f90b5b07 100644 --- a/service/rolesanywhere/api.go +++ b/service/rolesanywhere/api.go @@ -56,9 +56,8 @@ func (c *RolesAnywhere) CreateProfileRequest(input *CreateProfileInput) (req *re // CreateProfile API operation for IAM Roles Anywhere. // -// Creates a profile. A profile is configuration resource to list the roles -// that RolesAnywhere service is trusted to assume. In addition, by applying -// a profile you can intersect permissions with IAM managed policies. +// Creates a profile, a list of the roles that Roles Anywhere service is trusted +// to assume. You use profiles to intersect permissions with IAM managed policies. // // Required permissions: rolesanywhere:CreateProfile. // @@ -142,13 +141,12 @@ func (c *RolesAnywhere) CreateTrustAnchorRequest(input *CreateTrustAnchorInput) // CreateTrustAnchor API operation for IAM Roles Anywhere. // -// Creates a trust anchor. You establish trust between IAM Roles Anywhere and -// your certificate authority (CA) by configuring a trust anchor. A Trust Anchor -// is defined either as a reference to a AWS Certificate Manager Private Certificate -// Authority (ACM PCA), or by uploading a Certificate Authority (CA) certificate. -// Your AWS workloads can authenticate with the trust anchor using certificates -// issued by the trusted Certificate Authority (CA) in exchange for temporary -// AWS credentials. +// Creates a trust anchor to establish trust between IAM Roles Anywhere and +// your certificate authority (CA). You can define a trust anchor as a reference +// to an Private Certificate Authority (Private CA) or by uploading a CA certificate. +// Your Amazon Web Services workloads can authenticate with the trust anchor +// using certificates issued by the CA in exchange for temporary Amazon Web +// Services credentials. // // Required permissions: rolesanywhere:CreateTrustAnchor. // @@ -568,8 +566,8 @@ func (c *RolesAnywhere) DisableProfileRequest(input *DisableProfileInput) (req * // DisableProfile API operation for IAM Roles Anywhere. // -// Disables a profile. When disabled, CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) -// requests with this profile fail. +// Disables a profile. When disabled, temporary credential requests with this +// profile fail. // // Required permissions: rolesanywhere:DisableProfile. // @@ -653,8 +651,8 @@ func (c *RolesAnywhere) DisableTrustAnchorRequest(input *DisableTrustAnchorInput // DisableTrustAnchor API operation for IAM Roles Anywhere. // -// Disables a trust anchor. When disabled, CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) -// requests specifying this trust anchor are unauthorized. +// Disables a trust anchor. When disabled, temporary credential requests specifying +// this trust anchor are unauthorized. // // Required permissions: rolesanywhere:DisableTrustAnchor. // @@ -823,8 +821,7 @@ func (c *RolesAnywhere) EnableProfileRequest(input *EnableProfileInput) (req *re // EnableProfile API operation for IAM Roles Anywhere. // -// Enables the roles in a profile to receive session credentials in CreateSession -// (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html). +// Enables temporary credential requests for a profile. // // Required permissions: rolesanywhere:EnableProfile. // @@ -1157,10 +1154,10 @@ func (c *RolesAnywhere) GetSubjectRequest(input *GetSubjectInput) (req *request. // GetSubject API operation for IAM Roles Anywhere. // -// Gets a Subject. A Subject associates a certificate identity with authentication -// attempts by CreateSession. The Subject resources stores audit information -// such as status of the last authentication attempt, the certificate data used -// in the attempt, and the last time the associated identity attempted authentication. +// Gets a subject, which associates a certificate identity with authentication +// attempts. The subject stores auditing information such as the status of the +// last authentication attempt, the certificate data used in the attempt, and +// the last time the associated identity attempted authentication. // // Required permissions: rolesanywhere:GetSubject. // @@ -1331,9 +1328,9 @@ func (c *RolesAnywhere) ImportCrlRequest(input *ImportCrlInput) (req *request.Re // ImportCrl API operation for IAM Roles Anywhere. // -// Imports the certificate revocation list (CRL). CRl is a list of certificates +// Imports the certificate revocation list (CRL). A CRL is a list of certificates // that have been revoked by the issuing certificate Authority (CA). IAM Roles -// Anywhere validates against the crl list before issuing credentials. +// Anywhere validates against the CRL before issuing credentials. // // Required permissions: rolesanywhere:ImportCrl. // @@ -1423,7 +1420,8 @@ func (c *RolesAnywhere) ListCrlsRequest(input *ListCrlsInput) (req *request.Requ // ListCrls API operation for IAM Roles Anywhere. // -// Lists all Crls in the authenticated account and Amazon Web Services Region. +// Lists all certificate revocation lists (CRL) in the authenticated account +// and Amazon Web Services Region. // // Required permissions: rolesanywhere:ListCrls. // @@ -2026,6 +2024,183 @@ func (c *RolesAnywhere) ListTrustAnchorsPagesWithContext(ctx aws.Context, input return p.Err() } +const opPutNotificationSettings = "PutNotificationSettings" + +// PutNotificationSettingsRequest generates a "aws/request.Request" representing the +// client's request for the PutNotificationSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutNotificationSettings for more information on using the PutNotificationSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutNotificationSettingsRequest method. +// req, resp := client.PutNotificationSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rolesanywhere-2018-05-10/PutNotificationSettings +func (c *RolesAnywhere) PutNotificationSettingsRequest(input *PutNotificationSettingsInput) (req *request.Request, output *PutNotificationSettingsOutput) { + op := &request.Operation{ + Name: opPutNotificationSettings, + HTTPMethod: "PATCH", + HTTPPath: "/put-notifications-settings", + } + + if input == nil { + input = &PutNotificationSettingsInput{} + } + + output = &PutNotificationSettingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutNotificationSettings API operation for IAM Roles Anywhere. +// +// Attaches a list of notification settings to a trust anchor. +// +// A notification setting includes information such as event name, threshold, +// status of the notification setting, and the channel to notify. +// +// Required permissions: rolesanywhere:PutNotificationSettings. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for IAM Roles Anywhere's +// API operation PutNotificationSettings for usage and error information. +// +// Returned Error Types: +// +// - ValidationException +// Validation exception error. +// +// - ResourceNotFoundException +// The resource could not be found. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rolesanywhere-2018-05-10/PutNotificationSettings +func (c *RolesAnywhere) PutNotificationSettings(input *PutNotificationSettingsInput) (*PutNotificationSettingsOutput, error) { + req, out := c.PutNotificationSettingsRequest(input) + return out, req.Send() +} + +// PutNotificationSettingsWithContext is the same as PutNotificationSettings with the addition of +// the ability to pass a context and additional request options. +// +// See PutNotificationSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RolesAnywhere) PutNotificationSettingsWithContext(ctx aws.Context, input *PutNotificationSettingsInput, opts ...request.Option) (*PutNotificationSettingsOutput, error) { + req, out := c.PutNotificationSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetNotificationSettings = "ResetNotificationSettings" + +// ResetNotificationSettingsRequest generates a "aws/request.Request" representing the +// client's request for the ResetNotificationSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetNotificationSettings for more information on using the ResetNotificationSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ResetNotificationSettingsRequest method. +// req, resp := client.ResetNotificationSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rolesanywhere-2018-05-10/ResetNotificationSettings +func (c *RolesAnywhere) ResetNotificationSettingsRequest(input *ResetNotificationSettingsInput) (req *request.Request, output *ResetNotificationSettingsOutput) { + op := &request.Operation{ + Name: opResetNotificationSettings, + HTTPMethod: "PATCH", + HTTPPath: "/reset-notifications-settings", + } + + if input == nil { + input = &ResetNotificationSettingsInput{} + } + + output = &ResetNotificationSettingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResetNotificationSettings API operation for IAM Roles Anywhere. +// +// Resets the custom notification setting to IAM Roles Anywhere default setting. +// +// Required permissions: rolesanywhere:ResetNotificationSettings. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for IAM Roles Anywhere's +// API operation ResetNotificationSettings for usage and error information. +// +// Returned Error Types: +// +// - ValidationException +// Validation exception error. +// +// - ResourceNotFoundException +// The resource could not be found. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rolesanywhere-2018-05-10/ResetNotificationSettings +func (c *RolesAnywhere) ResetNotificationSettings(input *ResetNotificationSettingsInput) (*ResetNotificationSettingsOutput, error) { + req, out := c.ResetNotificationSettingsRequest(input) + return out, req.Send() +} + +// ResetNotificationSettingsWithContext is the same as ResetNotificationSettings with the addition of +// the ability to pass a context and additional request options. +// +// See ResetNotificationSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RolesAnywhere) ResetNotificationSettingsWithContext(ctx aws.Context, input *ResetNotificationSettingsInput, opts ...request.Option) (*ResetNotificationSettingsOutput, error) { + req, out := c.ResetNotificationSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the @@ -2248,9 +2423,9 @@ func (c *RolesAnywhere) UpdateCrlRequest(input *UpdateCrlInput) (req *request.Re // UpdateCrl API operation for IAM Roles Anywhere. // -// Updates the certificate revocation list (CRL). CRl is a list of certificates -// that have been revoked by the issuing certificate Authority (CA). IAM Roles -// Anywhere validates against the crl list before issuing credentials. +// Updates the certificate revocation list (CRL). A CRL is a list of certificates +// that have been revoked by the issuing certificate authority (CA). IAM Roles +// Anywhere validates against the CRL before issuing credentials. // // Required permissions: rolesanywhere:UpdateCrl. // @@ -2337,9 +2512,9 @@ func (c *RolesAnywhere) UpdateProfileRequest(input *UpdateProfileInput) (req *re // UpdateProfile API operation for IAM Roles Anywhere. // -// Updates the profile. A profile is configuration resource to list the roles -// that RolesAnywhere service is trusted to assume. In addition, by applying -// a profile you can scope-down permissions with IAM managed policies. +// Updates a profile, a list of the roles that IAM Roles Anywhere service is +// trusted to assume. You use profiles to intersect permissions with IAM managed +// policies. // // Required permissions: rolesanywhere:UpdateProfile. // @@ -2426,13 +2601,12 @@ func (c *RolesAnywhere) UpdateTrustAnchorRequest(input *UpdateTrustAnchorInput) // UpdateTrustAnchor API operation for IAM Roles Anywhere. // -// Updates the trust anchor.You establish trust between IAM Roles Anywhere and -// your certificate authority (CA) by configuring a trust anchor. A Trust Anchor -// is defined either as a reference to a AWS Certificate Manager Private Certificate -// Authority (ACM PCA), or by uploading a Certificate Authority (CA) certificate. -// Your AWS workloads can authenticate with the trust anchor using certificates -// issued by the trusted Certificate Authority (CA) in exchange for temporary -// AWS credentials. +// Updates a trust anchor. You establish trust between IAM Roles Anywhere and +// your certificate authority (CA) by configuring a trust anchor. You can define +// a trust anchor as a reference to an Private Certificate Authority (Private +// CA) or by uploading a CA certificate. Your Amazon Web Services workloads +// can authenticate with the trust anchor using certificates issued by the CA +// in exchange for temporary Amazon Web Services credentials. // // Required permissions: rolesanywhere:UpdateTrustAnchor. // @@ -2557,12 +2731,12 @@ type CreateProfileInput struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // Specifies whether instance properties are required in CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) + // Specifies whether instance properties are required in temporary credential // requests with this profile. RequireInstanceProperties *bool `locationName:"requireInstanceProperties" type:"boolean"` - // A list of IAM roles that this profile can assume in a CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operation. + // A list of IAM roles that this profile can assume in a temporary credential + // request. // // RoleArns is a required field RoleArns []*string `locationName:"roleArns" type:"list" required:"true"` @@ -2715,6 +2889,9 @@ type CreateTrustAnchorInput struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` + // A list of notification settings to be associated to the trust anchor. + NotificationSettings []*NotificationSetting `locationName:"notificationSettings" type:"list"` + // The trust anchor type and its related certificate data. // // Source is a required field @@ -2754,6 +2931,21 @@ func (s *CreateTrustAnchorInput) Validate() error { if s.Source == nil { invalidParams.Add(request.NewErrParamRequired("Source")) } + if s.NotificationSettings != nil { + for i, v := range s.NotificationSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NotificationSettings", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -2783,6 +2975,12 @@ func (s *CreateTrustAnchorInput) SetName(v string) *CreateTrustAnchorInput { return s } +// SetNotificationSettings sets the NotificationSettings field's value. +func (s *CreateTrustAnchorInput) SetNotificationSettings(v []*NotificationSetting) *CreateTrustAnchorInput { + s.NotificationSettings = v + return s +} + // SetSource sets the Source field's value. func (s *CreateTrustAnchorInput) SetSource(v *Source) *CreateTrustAnchorInput { s.Source = v @@ -2828,24 +3026,22 @@ func (s *CreateTrustAnchorOutput) SetTrustAnchor(v *TrustAnchorDetail) *CreateTr return s } -// A record of a presented X509 credential to CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html). +// A record of a presented X509 credential from a temporary credential request. type CredentialSummary struct { _ struct{} `type:"structure"` // Indicates whether the credential is enabled. Enabled *bool `locationName:"enabled" type:"boolean"` - // Indicates whether the CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operation was successful. + // Indicates whether the temporary credential request was successful. Failed *bool `locationName:"failed" type:"boolean"` // The fully qualified domain name of the issuing certificate for the presented // end-entity certificate. Issuer *string `locationName:"issuer" type:"string"` - // The ISO-8601 time stamp of when the certificate was last used in a CreateSession - // (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operation. + // The ISO-8601 time stamp of when the certificate was last used in a temporary + // credential request. SeenAt *time.Time `locationName:"seenAt" type:"timestamp" timestampFormat:"iso8601"` // The serial number of the certificate. @@ -4072,7 +4268,7 @@ func (s *GetTrustAnchorOutput) SetTrustAnchor(v *TrustAnchorDetail) *GetTrustAnc type ImportCrlInput struct { _ struct{} `type:"structure"` - // The x509 v3 specified certificate revocation list + // The x509 v3 specified certificate revocation list (CRL). // CrlData is automatically base64 encoded/decoded by the SDK. // // CrlData is a required field @@ -4221,16 +4417,14 @@ func (s *ImportCrlOutput) SetCrl(v *CrlDetail) *ImportCrlOutput { type InstanceProperty struct { _ struct{} `type:"structure"` - // Indicates whether the CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operation was successful. + // Indicates whether the temporary credential request was successful. Failed *bool `locationName:"failed" type:"boolean"` // A list of instanceProperty objects. Properties map[string]*string `locationName:"properties" type:"map"` - // The ISO-8601 time stamp of when the certificate was last used in a CreateSession - // (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operation. + // The ISO-8601 time stamp of when the certificate was last used in a temporary + // credential request. SeenAt *time.Time `locationName:"seenAt" type:"timestamp" timestampFormat:"iso8601"` } @@ -4274,7 +4468,7 @@ type ListCrlsInput struct { _ struct{} `type:"structure" nopayload:"true"` // A token that indicates where the output should continue from, if a previous - // operation did not show all results. To get the next results, call the operation + // request did not show all results. To get the next results, make the request // again with this value. NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` @@ -4332,7 +4526,7 @@ type ListCrlsOutput struct { Crls []*CrlDetail `locationName:"crls" type:"list"` // A token that indicates where the output should continue from, if a previous - // operation did not show all results. To get the next results, call the operation + // request did not show all results. To get the next results, make the request // again with this value. NextToken *string `locationName:"nextToken" type:"string"` } @@ -4371,7 +4565,7 @@ type ListProfilesInput struct { _ struct{} `type:"structure" nopayload:"true"` // A token that indicates where the output should continue from, if a previous - // operation did not show all results. To get the next results, call the operation + // request did not show all results. To get the next results, make the request // again with this value. NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` @@ -4426,7 +4620,7 @@ type ListProfilesOutput struct { _ struct{} `type:"structure"` // A token that indicates where the output should continue from, if a previous - // operation did not show all results. To get the next results, call the operation + // request did not show all results. To get the next results, make the request // again with this value. NextToken *string `locationName:"nextToken" type:"string"` @@ -4468,7 +4662,7 @@ type ListSubjectsInput struct { _ struct{} `type:"structure" nopayload:"true"` // A token that indicates where the output should continue from, if a previous - // operation did not show all results. To get the next results, call the operation + // request did not show all results. To get the next results, make the request // again with this value. NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` @@ -4523,7 +4717,7 @@ type ListSubjectsOutput struct { _ struct{} `type:"structure"` // A token that indicates where the output should continue from, if a previous - // operation did not show all results. To get the next results, call the operation + // request did not show all results. To get the next results, make the request // again with this value. NextToken *string `locationName:"nextToken" type:"string"` @@ -4645,7 +4839,7 @@ type ListTrustAnchorsInput struct { _ struct{} `type:"structure" nopayload:"true"` // A token that indicates where the output should continue from, if a previous - // operation did not show all results. To get the next results, call the operation + // request did not show all results. To get the next results, make the request // again with this value. NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` @@ -4700,7 +4894,7 @@ type ListTrustAnchorsOutput struct { _ struct{} `type:"structure"` // A token that indicates where the output should continue from, if a previous - // operation did not show all results. To get the next results, call the operation + // request did not show all results. To get the next results, make the request // again with this value. NextToken *string `locationName:"nextToken" type:"string"` @@ -4738,6 +4932,233 @@ func (s *ListTrustAnchorsOutput) SetTrustAnchors(v []*TrustAnchorDetail) *ListTr return s } +// Customizable notification settings that will be applied to notification events. +// IAM Roles Anywhere consumes these settings while notifying across multiple +// channels - CloudWatch metrics, EventBridge, and Health Dashboard. +type NotificationSetting struct { + _ struct{} `type:"structure"` + + // The specified channel of notification. IAM Roles Anywhere uses CloudWatch + // metrics, EventBridge, and Health Dashboard to notify for an event. + // + // In the absence of a specific channel, IAM Roles Anywhere applies this setting + // to 'ALL' channels. + Channel *string `locationName:"channel" type:"string" enum:"NotificationChannel"` + + // Indicates whether the notification setting is enabled. + // + // Enabled is a required field + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` + + // The event to which this notification setting is applied. + // + // Event is a required field + Event *string `locationName:"event" type:"string" required:"true" enum:"NotificationEvent"` + + // The number of days before a notification event. This value is required for + // a notification setting that is enabled. + Threshold *int64 `locationName:"threshold" min:"1" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationSetting) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationSetting) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationSetting"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Event == nil { + invalidParams.Add(request.NewErrParamRequired("Event")) + } + if s.Threshold != nil && *s.Threshold < 1 { + invalidParams.Add(request.NewErrParamMinValue("Threshold", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannel sets the Channel field's value. +func (s *NotificationSetting) SetChannel(v string) *NotificationSetting { + s.Channel = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *NotificationSetting) SetEnabled(v bool) *NotificationSetting { + s.Enabled = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *NotificationSetting) SetEvent(v string) *NotificationSetting { + s.Event = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *NotificationSetting) SetThreshold(v int64) *NotificationSetting { + s.Threshold = &v + return s +} + +// The state of a notification setting. +// +// A notification setting includes information such as event name, threshold, +// status of the notification setting, and the channel to notify. +type NotificationSettingDetail struct { + _ struct{} `type:"structure"` + + // The specified channel of notification. IAM Roles Anywhere uses CloudWatch + // metrics, EventBridge, and Health Dashboard to notify for an event. + // + // In the absence of a specific channel, IAM Roles Anywhere applies this setting + // to 'ALL' channels. + Channel *string `locationName:"channel" type:"string" enum:"NotificationChannel"` + + // The principal that configured the notification setting. For default settings + // configured by IAM Roles Anywhere, the value is rolesanywhere.amazonaws.com, + // and for customized notifications settings, it is the respective account ID. + ConfiguredBy *string `locationName:"configuredBy" min:"1" type:"string"` + + // Indicates whether the notification setting is enabled. + // + // Enabled is a required field + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` + + // The event to which this notification setting is applied. + // + // Event is a required field + Event *string `locationName:"event" type:"string" required:"true" enum:"NotificationEvent"` + + // The number of days before a notification event. + Threshold *int64 `locationName:"threshold" min:"1" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationSettingDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationSettingDetail) GoString() string { + return s.String() +} + +// SetChannel sets the Channel field's value. +func (s *NotificationSettingDetail) SetChannel(v string) *NotificationSettingDetail { + s.Channel = &v + return s +} + +// SetConfiguredBy sets the ConfiguredBy field's value. +func (s *NotificationSettingDetail) SetConfiguredBy(v string) *NotificationSettingDetail { + s.ConfiguredBy = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *NotificationSettingDetail) SetEnabled(v bool) *NotificationSettingDetail { + s.Enabled = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *NotificationSettingDetail) SetEvent(v string) *NotificationSettingDetail { + s.Event = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *NotificationSettingDetail) SetThreshold(v int64) *NotificationSettingDetail { + s.Threshold = &v + return s +} + +// A notification setting key to reset. A notification setting key includes +// the event and the channel. +type NotificationSettingKey struct { + _ struct{} `type:"structure"` + + // The specified channel of notification. + Channel *string `locationName:"channel" type:"string" enum:"NotificationChannel"` + + // The notification setting event to reset. + // + // Event is a required field + Event *string `locationName:"event" type:"string" required:"true" enum:"NotificationEvent"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationSettingKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationSettingKey) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationSettingKey) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationSettingKey"} + if s.Event == nil { + invalidParams.Add(request.NewErrParamRequired("Event")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannel sets the Channel field's value. +func (s *NotificationSettingKey) SetChannel(v string) *NotificationSettingKey { + s.Channel = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *NotificationSettingKey) SetEvent(v string) *NotificationSettingKey { + s.Event = &v + return s +} + // The state of the profile after a read or write operation. type ProfileDetail struct { _ struct{} `type:"structure"` @@ -4766,12 +5187,12 @@ type ProfileDetail struct { // The unique identifier of the profile. ProfileId *string `locationName:"profileId" min:"36" type:"string"` - // Specifies whether instance properties are required in CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) + // Specifies whether instance properties are required in temporary credential // requests with this profile. RequireInstanceProperties *bool `locationName:"requireInstanceProperties" type:"boolean"` - // A list of IAM roles that this profile can assume in a CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operation. + // A list of IAM roles that this profile can assume in a temporary credential + // request. RoleArns []*string `locationName:"roleArns" type:"list"` // A session policy that applies to the trust boundary of the vended session @@ -4872,6 +5293,219 @@ func (s *ProfileDetail) SetUpdatedAt(v time.Time) *ProfileDetail { return s } +type PutNotificationSettingsInput struct { + _ struct{} `type:"structure"` + + // A list of notification settings to be associated to the trust anchor. + // + // NotificationSettings is a required field + NotificationSettings []*NotificationSetting `locationName:"notificationSettings" type:"list" required:"true"` + + // The unique identifier of the trust anchor. + // + // TrustAnchorId is a required field + TrustAnchorId *string `locationName:"trustAnchorId" min:"36" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutNotificationSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutNotificationSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutNotificationSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutNotificationSettingsInput"} + if s.NotificationSettings == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationSettings")) + } + if s.TrustAnchorId == nil { + invalidParams.Add(request.NewErrParamRequired("TrustAnchorId")) + } + if s.TrustAnchorId != nil && len(*s.TrustAnchorId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("TrustAnchorId", 36)) + } + if s.NotificationSettings != nil { + for i, v := range s.NotificationSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NotificationSettings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotificationSettings sets the NotificationSettings field's value. +func (s *PutNotificationSettingsInput) SetNotificationSettings(v []*NotificationSetting) *PutNotificationSettingsInput { + s.NotificationSettings = v + return s +} + +// SetTrustAnchorId sets the TrustAnchorId field's value. +func (s *PutNotificationSettingsInput) SetTrustAnchorId(v string) *PutNotificationSettingsInput { + s.TrustAnchorId = &v + return s +} + +type PutNotificationSettingsOutput struct { + _ struct{} `type:"structure"` + + // The state of the trust anchor after a read or write operation. + // + // TrustAnchor is a required field + TrustAnchor *TrustAnchorDetail `locationName:"trustAnchor" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutNotificationSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutNotificationSettingsOutput) GoString() string { + return s.String() +} + +// SetTrustAnchor sets the TrustAnchor field's value. +func (s *PutNotificationSettingsOutput) SetTrustAnchor(v *TrustAnchorDetail) *PutNotificationSettingsOutput { + s.TrustAnchor = v + return s +} + +type ResetNotificationSettingsInput struct { + _ struct{} `type:"structure"` + + // A list of notification setting keys to reset. A notification setting key + // includes the event and the channel. + // + // NotificationSettingKeys is a required field + NotificationSettingKeys []*NotificationSettingKey `locationName:"notificationSettingKeys" type:"list" required:"true"` + + // The unique identifier of the trust anchor. + // + // TrustAnchorId is a required field + TrustAnchorId *string `locationName:"trustAnchorId" min:"36" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResetNotificationSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResetNotificationSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetNotificationSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetNotificationSettingsInput"} + if s.NotificationSettingKeys == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationSettingKeys")) + } + if s.TrustAnchorId == nil { + invalidParams.Add(request.NewErrParamRequired("TrustAnchorId")) + } + if s.TrustAnchorId != nil && len(*s.TrustAnchorId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("TrustAnchorId", 36)) + } + if s.NotificationSettingKeys != nil { + for i, v := range s.NotificationSettingKeys { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NotificationSettingKeys", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotificationSettingKeys sets the NotificationSettingKeys field's value. +func (s *ResetNotificationSettingsInput) SetNotificationSettingKeys(v []*NotificationSettingKey) *ResetNotificationSettingsInput { + s.NotificationSettingKeys = v + return s +} + +// SetTrustAnchorId sets the TrustAnchorId field's value. +func (s *ResetNotificationSettingsInput) SetTrustAnchorId(v string) *ResetNotificationSettingsInput { + s.TrustAnchorId = &v + return s +} + +type ResetNotificationSettingsOutput struct { + _ struct{} `type:"structure"` + + // The state of the trust anchor after a read or write operation. + // + // TrustAnchor is a required field + TrustAnchor *TrustAnchorDetail `locationName:"trustAnchor" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResetNotificationSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResetNotificationSettingsOutput) GoString() string { + return s.String() +} + +// SetTrustAnchor sets the TrustAnchor field's value. +func (s *ResetNotificationSettingsOutput) SetTrustAnchor(v *TrustAnchorDetail) *ResetNotificationSettingsOutput { + s.TrustAnchor = v + return s +} + // The resource could not be found. type ResourceNotFoundException struct { _ struct{} `type:"structure"` @@ -4965,6 +5599,21 @@ func (s Source) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Source) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Source"} + if s.SourceData != nil { + if err := s.SourceData.Validate(); err != nil { + invalidParams.AddNested("SourceData", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetSourceData sets the SourceData field's value. func (s *Source) SetSourceData(v *SourceData) *Source { s.SourceData = v @@ -4981,14 +5630,14 @@ func (s *Source) SetSourceType(v string) *Source { type SourceData struct { _ struct{} `type:"structure"` - // The root certificate of the Certificate Manager Private Certificate Authority - // specified by this ARN is used in trust validation for CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operations. Included for trust anchors of type AWS_ACM_PCA. + // The root certificate of the Private Certificate Authority specified by this + // ARN is used in trust validation for temporary credential requests. Included + // for trust anchors of type AWS_ACM_PCA. AcmPcaArn *string `locationName:"acmPcaArn" type:"string"` // The PEM-encoded data for the certificate anchor. Included for trust anchors // of type CERTIFICATE_BUNDLE. - X509CertificateData *string `locationName:"x509CertificateData" type:"string"` + X509CertificateData *string `locationName:"x509CertificateData" min:"1" type:"string"` } // String returns the string representation. @@ -5009,6 +5658,19 @@ func (s SourceData) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceData"} + if s.X509CertificateData != nil && len(*s.X509CertificateData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("X509CertificateData", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAcmPcaArn sets the AcmPcaArn field's value. func (s *SourceData) SetAcmPcaArn(v string) *SourceData { s.AcmPcaArn = &v @@ -5029,7 +5691,7 @@ type SubjectDetail struct { CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"iso8601"` // The temporary session credentials vended at the last authenticating call - // with this Subject. + // with this subject. Credentials []*CredentialSummary `locationName:"credentials" type:"list"` // The enabled status of the subject. @@ -5038,7 +5700,7 @@ type SubjectDetail struct { // The specified instance properties associated with the request. InstanceProperties []*InstanceProperty `locationName:"instanceProperties" type:"list"` - // The ISO-8601 timestamp of the last time this Subject requested temporary + // The ISO-8601 timestamp of the last time this subject requested temporary // session credentials. LastSeenAt *time.Time `locationName:"lastSeenAt" type:"timestamp" timestampFormat:"iso8601"` @@ -5127,22 +5789,19 @@ func (s *SubjectDetail) SetX509Subject(v string) *SubjectDetail { return s } -// A summary representation of Subject resources returned in read operations; -// primarily ListSubjects. +// A summary representation of subjects. type SubjectSummary struct { _ struct{} `type:"structure"` - // The ISO-8601 time stamp of when the certificate was first used in a CreateSession - // (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operation. + // The ISO-8601 time stamp of when the certificate was first used in a temporary + // credential request. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"iso8601"` - // The enabled status of the Subject. + // The enabled status of the subject. Enabled *bool `locationName:"enabled" type:"boolean"` - // The ISO-8601 time stamp of when the certificate was last used in a CreateSession - // (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operation. + // The ISO-8601 time stamp of when the certificate was last used in a temporary + // credential request. LastSeenAt *time.Time `locationName:"lastSeenAt" type:"timestamp" timestampFormat:"iso8601"` // The ARN of the resource. @@ -5462,6 +6121,9 @@ type TrustAnchorDetail struct { // The name of the trust anchor. Name *string `locationName:"name" min:"1" type:"string"` + // A list of notification settings to be associated to the trust anchor. + NotificationSettings []*NotificationSettingDetail `locationName:"notificationSettings" type:"list"` + // The trust anchor type and its related certificate data. Source *Source `locationName:"source" type:"structure"` @@ -5511,6 +6173,12 @@ func (s *TrustAnchorDetail) SetName(v string) *TrustAnchorDetail { return s } +// SetNotificationSettings sets the NotificationSettings field's value. +func (s *TrustAnchorDetail) SetNotificationSettings(v []*NotificationSettingDetail) *TrustAnchorDetail { + s.NotificationSettings = v + return s +} + // SetSource sets the Source field's value. func (s *TrustAnchorDetail) SetSource(v *Source) *TrustAnchorDetail { s.Source = v @@ -5623,7 +6291,7 @@ func (s UntagResourceOutput) GoString() string { type UpdateCrlInput struct { _ struct{} `type:"structure"` - // The x509 v3 specified certificate revocation list + // The x509 v3 specified certificate revocation list (CRL). // CrlData is automatically base64 encoded/decoded by the SDK. CrlData []byte `locationName:"crlData" min:"1" type:"blob"` @@ -5745,8 +6413,8 @@ type UpdateProfileInput struct { // ProfileId is a required field ProfileId *string `location:"uri" locationName:"profileId" min:"36" type:"string" required:"true"` - // A list of IAM roles that this profile can assume in a CreateSession (https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) - // operation. + // A list of IAM roles that this profile can assume in a temporary credential + // request. RoleArns []*string `locationName:"roleArns" type:"list"` // A session policy that applies to the trust boundary of the vended session @@ -5909,6 +6577,11 @@ func (s *UpdateTrustAnchorInput) Validate() error { if s.TrustAnchorId != nil && len(*s.TrustAnchorId) < 36 { invalidParams.Add(request.NewErrParamMinLen("TrustAnchorId", 36)) } + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6031,6 +6704,34 @@ func (s *ValidationException) RequestID() string { return s.RespMetadata.RequestID } +const ( + // NotificationChannelAll is a NotificationChannel enum value + NotificationChannelAll = "ALL" +) + +// NotificationChannel_Values returns all elements of the NotificationChannel enum +func NotificationChannel_Values() []string { + return []string{ + NotificationChannelAll, + } +} + +const ( + // NotificationEventCaCertificateExpiry is a NotificationEvent enum value + NotificationEventCaCertificateExpiry = "CA_CERTIFICATE_EXPIRY" + + // NotificationEventEndEntityCertificateExpiry is a NotificationEvent enum value + NotificationEventEndEntityCertificateExpiry = "END_ENTITY_CERTIFICATE_EXPIRY" +) + +// NotificationEvent_Values returns all elements of the NotificationEvent enum +func NotificationEvent_Values() []string { + return []string{ + NotificationEventCaCertificateExpiry, + NotificationEventEndEntityCertificateExpiry, + } +} + const ( // TrustAnchorTypeAwsAcmPca is a TrustAnchorType enum value TrustAnchorTypeAwsAcmPca = "AWS_ACM_PCA" diff --git a/service/rolesanywhere/doc.go b/service/rolesanywhere/doc.go index b04369d5646..816b353d7cb 100644 --- a/service/rolesanywhere/doc.go +++ b/service/rolesanywhere/doc.go @@ -3,24 +3,24 @@ // Package rolesanywhere provides the client and types for making API // requests to IAM Roles Anywhere. // -// AWS Identity and Access Management Roles Anywhere provides a secure way for -// your workloads such as servers, containers, and applications running outside -// of AWS to obtain Temporary AWS credentials. Your workloads can use the same -// IAM policies and roles that you have configured with native AWS applications -// to access AWS resources. Using IAM Roles Anywhere will eliminate the need -// to manage long term credentials for workloads running outside of AWS. -// -// To use IAM Roles Anywhere customer workloads will need to use X.509 certificates -// issued by their Certificate Authority (CA) . The Certificate Authority (CA) -// needs to be registered with IAM Roles Anywhere as a trust anchor to establish -// trust between customer PKI and IAM Roles Anywhere. Customers who do not manage -// their own PKI system can use AWS Certificate Manager Private Certificate -// Authority (ACM PCA) to create a Certificate Authority and use that to establish -// trust with IAM Roles Anywhere -// -// This guide describes the IAM rolesanywhere operations that you can call programmatically. -// For general information about IAM Roles Anywhere see https://docs.aws.amazon.com/ -// (https://docs.aws.amazon.com/) +// Identity and Access Management Roles Anywhere provides a secure way for your +// workloads such as servers, containers, and applications that run outside +// of Amazon Web Services to obtain temporary Amazon Web Services credentials. +// Your workloads can use the same IAM policies and roles you have for native +// Amazon Web Services applications to access Amazon Web Services resources. +// Using IAM Roles Anywhere eliminates the need to manage long-term credentials +// for workloads running outside of Amazon Web Services. +// +// To use IAM Roles Anywhere, your workloads must use X.509 certificates issued +// by their certificate authority (CA). You register the CA with IAM Roles Anywhere +// as a trust anchor to establish trust between your public key infrastructure +// (PKI) and IAM Roles Anywhere. If you don't manage your own PKI system, you +// can use Private Certificate Authority to create a CA and then use that to +// establish trust with IAM Roles Anywhere. +// +// This guide describes the IAM Roles Anywhere operations that you can call +// programmatically. For more information about IAM Roles Anywhere, see the +// IAM Roles Anywhere User Guide (https://docs.aws.amazon.com/rolesanywhere/latest/userguide/introduction.html). // // See https://docs.aws.amazon.com/goto/WebAPI/rolesanywhere-2018-05-10 for more information on this service. // diff --git a/service/rolesanywhere/rolesanywhereiface/interface.go b/service/rolesanywhere/rolesanywhereiface/interface.go index 1ba4cc8bd11..0b16a7d4757 100644 --- a/service/rolesanywhere/rolesanywhereiface/interface.go +++ b/service/rolesanywhere/rolesanywhereiface/interface.go @@ -156,6 +156,14 @@ type RolesAnywhereAPI interface { ListTrustAnchorsPages(*rolesanywhere.ListTrustAnchorsInput, func(*rolesanywhere.ListTrustAnchorsOutput, bool) bool) error ListTrustAnchorsPagesWithContext(aws.Context, *rolesanywhere.ListTrustAnchorsInput, func(*rolesanywhere.ListTrustAnchorsOutput, bool) bool, ...request.Option) error + PutNotificationSettings(*rolesanywhere.PutNotificationSettingsInput) (*rolesanywhere.PutNotificationSettingsOutput, error) + PutNotificationSettingsWithContext(aws.Context, *rolesanywhere.PutNotificationSettingsInput, ...request.Option) (*rolesanywhere.PutNotificationSettingsOutput, error) + PutNotificationSettingsRequest(*rolesanywhere.PutNotificationSettingsInput) (*request.Request, *rolesanywhere.PutNotificationSettingsOutput) + + ResetNotificationSettings(*rolesanywhere.ResetNotificationSettingsInput) (*rolesanywhere.ResetNotificationSettingsOutput, error) + ResetNotificationSettingsWithContext(aws.Context, *rolesanywhere.ResetNotificationSettingsInput, ...request.Option) (*rolesanywhere.ResetNotificationSettingsOutput, error) + ResetNotificationSettingsRequest(*rolesanywhere.ResetNotificationSettingsInput) (*request.Request, *rolesanywhere.ResetNotificationSettingsOutput) + TagResource(*rolesanywhere.TagResourceInput) (*rolesanywhere.TagResourceOutput, error) TagResourceWithContext(aws.Context, *rolesanywhere.TagResourceInput, ...request.Option) (*rolesanywhere.TagResourceOutput, error) TagResourceRequest(*rolesanywhere.TagResourceInput) (*request.Request, *rolesanywhere.TagResourceOutput) diff --git a/service/transfer/api.go b/service/transfer/api.go index 6f364250119..0d02fa7cf90 100644 --- a/service/transfer/api.go +++ b/service/transfer/api.go @@ -1125,7 +1125,7 @@ func (c *Transfer) DeleteHostKeyRequest(input *DeleteHostKeyInput) (req *request // DeleteHostKey API operation for AWS Transfer Family. // -// Deletes the host key that's specified in the HoskKeyId parameter. +// Deletes the host key that's specified in the HostKeyId parameter. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2066,6 +2066,12 @@ func (c *Transfer) DescribeExecutionRequest(input *DescribeExecutionInput) (req // You can use DescribeExecution to check the details of the execution of the // specified workflow. // +// This API call only returns details for in-progress workflows. +// +// If you provide an ID for an execution that is not in progress, or if the +// execution doesn't match the specified workflow ID, you receive a ResourceNotFound +// exception. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2902,9 +2908,9 @@ func (c *Transfer) ImportSshPublicKeyRequest(input *ImportSshPublicKeyInput) (re // ImportSshPublicKey API operation for AWS Transfer Family. // -// Adds a Secure Shell (SSH) public key to a user account identified by a UserName -// value assigned to the specific file transfer protocol-enabled server, identified -// by ServerId. +// Adds a Secure Shell (SSH) public key to a Transfer Family user identified +// by a UserName value assigned to the specific file transfer protocol-enabled +// server, identified by ServerId. // // The response returns the UserName value, the ServerId value, and the name // of the SshPublicKeyId. @@ -3622,7 +3628,10 @@ func (c *Transfer) ListExecutionsRequest(input *ListExecutionsInput) (req *reque // ListExecutions API operation for AWS Transfer Family. // -// Lists all executions for the specified workflow. +// Lists all in-progress executions for the specified workflow. +// +// If the specified workflow ID cannot be found, ListExecutions returns a ResourceNotFound +// exception. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4618,7 +4627,8 @@ func (c *Transfer) ListWorkflowsRequest(input *ListWorkflowsInput) (req *request // ListWorkflows API operation for AWS Transfer Family. // -// Lists all of your workflows. +// Lists all workflows associated with your Amazon Web Services account for +// your current region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5271,8 +5281,15 @@ func (c *Transfer) TestIdentityProviderRequest(input *TestIdentityProviderInput) // The ServerId and UserName parameters are required. The ServerProtocol, SourceIp, // and UserPassword are all optional. // -// You cannot use TestIdentityProvider if the IdentityProviderType of your server -// is SERVICE_MANAGED. +// Note the following: +// +// - You cannot use TestIdentityProvider if the IdentityProviderType of your +// server is SERVICE_MANAGED. +// +// - TestIdentityProvider does not work with keys: it only accepts passwords. +// +// - TestIdentityProvider can test the password operation for a custom Identity +// Provider that handles keys and passwords. // // - If you provide any incorrect values for any parameters, the Response // field is empty. @@ -5285,7 +5302,9 @@ func (c *Transfer) TestIdentityProviderRequest(input *TestIdentityProviderInput) // - If you enter a Server ID for the --server-id parameter that does not // identify an actual Transfer server, you receive the following error: An // error occurred (ResourceNotFoundException) when calling the TestIdentityProvider -// operation: Unknown server +// operation: Unknown server. It is possible your sever is in a different +// region. You can specify a region by adding the following: --region region-code, +// such as --region us-east-2 to specify a server in US East (Ohio). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6479,18 +6498,18 @@ func (s *ConflictException) RequestID() string { type CopyStepDetails struct { _ struct{} `type:"structure"` - // Specifies the location for the file being copied. Use ${Transfer:username} + // Specifies the location for the file being copied. Use ${Transfer:UserName} // or ${Transfer:UploadDate} in this field to parametrize the destination prefix // by username or uploaded date. // - // * Set the value of DestinationFileLocation to ${Transfer:username} to + // * Set the value of DestinationFileLocation to ${Transfer:UserName} to // copy uploaded files to an Amazon S3 bucket that is prefixed with the name // of the Transfer Family user that uploaded the file. // // * Set the value of DestinationFileLocation to ${Transfer:UploadDate} to // copy uploaded files to an Amazon S3 bucket that is prefixed with the date // of the upload. The system resolves UploadDate to a date format of YYYY-MM-DD, - // based on the date the file is uploaded. + // based on the date the file is uploaded in UTC. DestinationFileLocation *InputFileLocation `type:"structure"` // The name of the step, used as an identifier. @@ -6498,6 +6517,15 @@ type CopyStepDetails struct { // A flag that indicates whether to overwrite an existing file of the same name. // The default is FALSE. + // + // If the workflow is processing a file that has the same name as an existing + // file, the behavior is as follows: + // + // * If OverwriteExisting is TRUE, the existing file is replaced with the + // file being processed. + // + // * If OverwriteExisting is FALSE, nothing happens, and the workflow processing + // stops. OverwriteExisting *string `type:"string" enum:"OverwriteExisting"` // Specifies which file to use as input to the workflow step: either the output @@ -7436,11 +7464,11 @@ type CreateServerInput struct { // String and GoString methods. HostKey *string `type:"string" sensitive:"true"` - // Required when IdentityProviderType is set to AWS_DIRECTORY_SERVICE or API_GATEWAY. - // Accepts an array containing all of the information required to use a directory - // in AWS_DIRECTORY_SERVICE or invoke a customer-supplied authentication API, - // including the API Gateway URL. Not required when IdentityProviderType is - // set to SERVICE_MANAGED. + // Required when IdentityProviderType is set to AWS_DIRECTORY_SERVICE, Amazon + // Web Services_LAMBDA or API_GATEWAY. Accepts an array containing all of the + // information required to use a directory in AWS_DIRECTORY_SERVICE or invoke + // a customer-supplied authentication API, including the API Gateway URL. Not + // required when IdentityProviderType is set to SERVICE_MANAGED. IdentityProviderDetails *IdentityProviderDetails `type:"structure"` // The mode of authentication for a server. The default value is SERVICE_MANAGED, @@ -7460,7 +7488,7 @@ type CreateServerInput struct { // // Use the AWS_LAMBDA value to directly use an Lambda function as your identity // provider. If you choose this value, you must specify the ARN for the Lambda - // function in the Function parameter or the IdentityProviderDetails data type. + // function in the Function parameter for the IdentityProviderDetails data type. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) @@ -7551,8 +7579,8 @@ type CreateServerInput struct { // // In addition to a workflow to execute when a file is uploaded completely, // WorkflowDetails can also contain a workflow ID (and execution role) for a - // workflow to execute on partial upload. A partial upload occurs when a file - // is open when the session disconnects. + // workflow to execute on partial upload. A partial upload occurs when the server + // session disconnects while the file is still being uploaded. WorkflowDetails *WorkflowDetails `type:"structure"` } @@ -8002,7 +8030,7 @@ type CreateUserOutput struct { // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // A unique string that identifies a user account associated with a server. + // A unique string that identifies a Transfer Family user. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -8047,8 +8075,8 @@ type CreateWorkflowInput struct { // Specifies the steps (actions) to take if errors are encountered during execution // of the workflow. // - // For custom steps, the lambda function needs to send FAILURE to the call back - // API to kick off the exception steps. Additionally, if the lambda does not + // For custom steps, the Lambda function needs to send FAILURE to the call back + // API to kick off the exception steps. Additionally, if the Lambda does not // send SUCCESS before it times out, the exception steps are executed. OnExceptionSteps []*WorkflowStep `type:"list"` @@ -8219,7 +8247,7 @@ type CustomStepDetails struct { // enter ${original.file}. SourceFileLocation *string `type:"string"` - // The ARN for the lambda function that is being called. + // The ARN for the Lambda function that is being called. Target *string `type:"string"` // Timeout, in seconds, for the step. @@ -8285,7 +8313,18 @@ func (s *CustomStepDetails) SetTimeoutSeconds(v int64) *CustomStepDetails { type DecryptStepDetails struct { _ struct{} `type:"structure"` - // Specifies the location for the file that's being processed. + // Specifies the location for the file being decrypted. Use ${Transfer:UserName} + // or ${Transfer:UploadDate} in this field to parametrize the destination prefix + // by username or uploaded date. + // + // * Set the value of DestinationFileLocation to ${Transfer:UserName} to + // decrypt uploaded files to an Amazon S3 bucket that is prefixed with the + // name of the Transfer Family user that uploaded the file. + // + // * Set the value of DestinationFileLocation to ${Transfer:UploadDate} to + // decrypt uploaded files to an Amazon S3 bucket that is prefixed with the + // date of the upload. The system resolves UploadDate to a date format of + // YYYY-MM-DD, based on the date the file is uploaded in UTC. // // DestinationFileLocation is a required field DestinationFileLocation *InputFileLocation `type:"structure" required:"true"` @@ -8295,6 +8334,15 @@ type DecryptStepDetails struct { // A flag that indicates whether to overwrite an existing file of the same name. // The default is FALSE. + // + // If the workflow is processing a file that has the same name as an existing + // file, the behavior is as follows: + // + // * If OverwriteExisting is TRUE, the existing file is replaced with the + // file being processed. + // + // * If OverwriteExisting is FALSE, nothing happens, and the workflow processing + // stops. OverwriteExisting *string `type:"string" enum:"OverwriteExisting"` // Specifies which file to use as input to the workflow step: either the output @@ -10179,8 +10227,8 @@ type DescribeUserOutput struct { // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // An array containing the properties of the user account for the ServerID value - // that you specified. + // An array containing the properties of the Transfer Family user for the ServerID + // value that you specified. // // User is a required field User *DescribedUser `type:"structure" required:"true"` @@ -11251,7 +11299,7 @@ type DescribedServer struct { // // Use the AWS_LAMBDA value to directly use an Lambda function as your identity // provider. If you choose this value, you must specify the ARN for the Lambda - // function in the Function parameter or the IdentityProviderDetails data type. + // function in the Function parameter for the IdentityProviderDetails data type. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) @@ -11359,8 +11407,8 @@ type DescribedServer struct { // // In addition to a workflow to execute when a file is uploaded completely, // WorkflowDetails can also contain a workflow ID (and execution role) for a - // workflow to execute on partial upload. A partial upload occurs when a file - // is open when the session disconnects. + // workflow to execute on partial upload. A partial upload occurs when the server + // session disconnects while the file is still being uploaded. WorkflowDetails *WorkflowDetails `type:"structure"` } @@ -12193,12 +12241,30 @@ type IdentityProviderDetails struct { // The identifier of the Directory Service directory that you want to stop sharing. DirectoryId *string `min:"12" type:"string"` - // The ARN for a lambda function to use for the Identity provider. + // The ARN for a Lambda function to use for the Identity provider. Function *string `min:"1" type:"string"` + // This parameter is only applicable if your IdentityProviderType is API_GATEWAY. // Provides the type of InvocationRole used to authenticate the user account. InvocationRole *string `min:"20" type:"string"` + // For SFTP-enabled servers, and for custom identity providers only, you can + // specify whether to authenticate using a password, SSH key pair, or both. + // + // * PASSWORD - users must provide their password to connect. + // + // * PUBLIC_KEY - users must provide their private key to connect. + // + // * PUBLIC_KEY_OR_PASSWORD - users can authenticate with either their password + // or their key. This is the default value. + // + // * PUBLIC_KEY_AND_PASSWORD - users must provide both their private key + // and their password to connect. The server checks the key first, and then + // if the key is valid, the system prompts for a password. If the private + // key provided does not match the public key that is stored, authentication + // fails. + SftpAuthenticationMethods *string `type:"string" enum:"SftpAuthenticationMethods"` + // Provides the location of the service endpoint used to authenticate users. Url *string `type:"string"` } @@ -12258,6 +12324,12 @@ func (s *IdentityProviderDetails) SetInvocationRole(v string) *IdentityProviderD return s } +// SetSftpAuthenticationMethods sets the SftpAuthenticationMethods field's value. +func (s *IdentityProviderDetails) SetSftpAuthenticationMethods(v string) *IdentityProviderDetails { + s.SftpAuthenticationMethods = &v + return s +} + // SetUrl sets the Url field's value. func (s *IdentityProviderDetails) SetUrl(v string) *IdentityProviderDetails { s.Url = &v @@ -12619,7 +12691,7 @@ type ImportSshPublicKeyInput struct { // SshPublicKeyBody is a required field SshPublicKeyBody *string `type:"string" required:"true"` - // The name of the user account that is assigned to one or more servers. + // The name of the Transfer Family user that is assigned to one or more servers. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -13546,18 +13618,7 @@ func (s *ListExecutionsInput) SetWorkflowId(v string) *ListExecutionsInput { type ListExecutionsOutput struct { _ struct{} `type:"structure"` - // Returns the details for each execution. - // - // * NextToken: returned from a call to several APIs, you can use pass it - // to a subsequent command to continue listing additional executions. - // - // * StartTime: timestamp indicating when the execution began. - // - // * Executions: details of the execution, including the execution ID, initial - // file location, and Service metadata. - // - // * Status: one of the following values: IN_PROGRESS, COMPLETED, EXCEPTION, - // HANDLING_EXEPTION. + // Returns the details for each execution, in a ListedExecution array. // // Executions is a required field Executions []*ListedExecution `type:"list" required:"true"` @@ -14277,8 +14338,8 @@ type ListUsersOutput struct { // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // Returns the user accounts and their properties for the ServerId value that - // you specify. + // Returns the Transfer Family users and their properties for the ServerId value + // that you specify. // // Users is a required field Users []*ListedUser `type:"list" required:"true"` @@ -14999,7 +15060,7 @@ type ListedServer struct { // // Use the AWS_LAMBDA value to directly use an Lambda function as your identity // provider. If you choose this value, you must specify the ARN for the Lambda - // function in the Function parameter or the IdentityProviderDetails data type. + // function in the Function parameter for the IdentityProviderDetails data type. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) @@ -16062,15 +16123,15 @@ func (s *ServiceUnavailableException) RequestID() string { } // Provides information about the public Secure Shell (SSH) key that is associated -// with a user account for the specific file transfer protocol-enabled server -// (as identified by ServerId). The information returned includes the date the -// key was imported, the public key contents, and the public key ID. A user -// can store more than one SSH public key associated with their user name on -// a specific server. +// with a Transfer Family user for the specific file transfer protocol-enabled +// server (as identified by ServerId). The information returned includes the +// date the key was imported, the public key contents, and the public key ID. +// A user can store more than one SSH public key associated with their user +// name on a specific server. type SshPublicKey struct { _ struct{} `type:"structure"` - // Specifies the date that the public key was added to the user account. + // Specifies the date that the public key was added to the Transfer Family user. // // DateImported is a required field DateImported *time.Time `type:"timestamp" required:"true"` @@ -16443,8 +16504,8 @@ type TagResourceInput struct { Arn *string `min:"20" type:"string" required:"true"` // Key-value pairs assigned to ARNs that you can use to group and search for - // resources by type. You can attach this metadata to user accounts for any - // purpose. + // resources by type. You can attach this metadata to resources (servers, users, + // workflows, and so on) for any purpose. // // Tags is a required field Tags []*Tag `min:"1" type:"list" required:"true"` @@ -16636,17 +16697,19 @@ type TestIdentityProviderInput struct { // * File Transfer Protocol Secure (FTPS) // // * File Transfer Protocol (FTP) + // + // * Applicability Statement 2 (AS2) ServerProtocol *string `type:"string" enum:"Protocol"` - // The source IP address of the user account to be tested. + // The source IP address of the account to be tested. SourceIp *string `type:"string"` - // The name of the user account to be tested. + // The name of the account to be tested. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` - // The password of the user account to be tested. + // The password of the account to be tested. // // UserPassword is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by TestIdentityProviderInput's @@ -16733,10 +16796,11 @@ type TestIdentityProviderOutput struct { // failed due to an incorrect username or password. Message *string `type:"string"` - // The response that is returned from your API Gateway. + // The response that is returned from your API Gateway or your Lambda function. Response *string `type:"string"` - // The HTTP status code that is the response from your API Gateway. + // The HTTP status code that is the response from your API Gateway or your Lambda + // function. // // StatusCode is a required field StatusCode *int64 `type:"integer" required:"true"` @@ -18036,8 +18100,8 @@ type UpdateServerInput struct { // Specifies the name of the security policy that is attached to the server. SecurityPolicyName *string `type:"string"` - // A system-assigned unique identifier for a server instance that the user account - // is assigned to. + // A system-assigned unique identifier for a server instance that the Transfer + // Family user is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -18047,8 +18111,8 @@ type UpdateServerInput struct { // // In addition to a workflow to execute when a file is uploaded completely, // WorkflowDetails can also contain a workflow ID (and execution role) for a - // workflow to execute on partial upload. A partial upload occurs when a file - // is open when the session disconnects. + // workflow to execute on partial upload. A partial upload occurs when the server + // session disconnects while the file is still being uploaded. // // To remove an associated workflow from a server, you can provide an empty // OnUpload object, as in the following example. @@ -18196,8 +18260,8 @@ func (s *UpdateServerInput) SetWorkflowDetails(v *WorkflowDetails) *UpdateServer type UpdateServerOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for a server that the user account is - // assigned to. + // A system-assigned unique identifier for a server that the Transfer Family + // user is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -18301,8 +18365,8 @@ type UpdateUserInput struct { // when servicing your users' transfer requests. Role *string `min:"20" type:"string"` - // A system-assigned unique identifier for a server instance that the user account - // is assigned to. + // A system-assigned unique identifier for a Transfer Family server instance + // that the user is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -18431,8 +18495,8 @@ func (s *UpdateUserInput) SetUserName(v string) *UpdateUserInput { type UpdateUserOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for a server instance that the user account - // is assigned to. + // A system-assigned unique identifier for a Transfer Family server instance + // that the account is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -18487,7 +18551,8 @@ type UserDetails struct { // workflow. SessionId *string `min:"3" type:"string"` - // A unique string that identifies a user account associated with a server. + // A unique string that identifies a Transfer Family user associated with a + // server. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -18534,8 +18599,8 @@ func (s *UserDetails) SetUserName(v string) *UserDetails { // // In addition to a workflow to execute when a file is uploaded completely, // WorkflowDetails can also contain a workflow ID (and execution role) for a -// workflow to execute on partial upload. A partial upload occurs when a file -// is open when the session disconnects. +// workflow to execute on partial upload. A partial upload occurs when the server +// session disconnects while the file is still being uploaded. type WorkflowDetail struct { _ struct{} `type:"structure"` @@ -19096,12 +19161,24 @@ func HomeDirectoryType_Values() []string { } } -// Returns information related to the type of user authentication that is in -// use for a file transfer protocol-enabled server's users. For AWS_DIRECTORY_SERVICE -// or SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are -// stored with a user on the server instance. For API_GATEWAY authentication, -// your custom authentication method is implemented by using an API call. The -// server can have only one method of authentication. +// The mode of authentication for a server. The default value is SERVICE_MANAGED, +// which allows you to store and access user credentials within the Transfer +// Family service. +// +// Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in +// Directory Service for Microsoft Active Directory or Microsoft Active Directory +// in your on-premises environment or in Amazon Web Services using AD Connector. +// This option also requires you to provide a Directory ID by using the IdentityProviderDetails +// parameter. +// +// Use the API_GATEWAY value to integrate with an identity provider of your +// choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway +// endpoint URL to call for authentication by using the IdentityProviderDetails +// parameter. +// +// Use the AWS_LAMBDA value to directly use an Lambda function as your identity +// provider. If you choose this value, you must specify the ARN for the Lambda +// function in the Function parameter for the IdentityProviderDetails data type. const ( // IdentityProviderTypeServiceManaged is a IdentityProviderType enum value IdentityProviderTypeServiceManaged = "SERVICE_MANAGED" @@ -19246,6 +19323,30 @@ func SetStatOption_Values() []string { } } +const ( + // SftpAuthenticationMethodsPassword is a SftpAuthenticationMethods enum value + SftpAuthenticationMethodsPassword = "PASSWORD" + + // SftpAuthenticationMethodsPublicKey is a SftpAuthenticationMethods enum value + SftpAuthenticationMethodsPublicKey = "PUBLIC_KEY" + + // SftpAuthenticationMethodsPublicKeyOrPassword is a SftpAuthenticationMethods enum value + SftpAuthenticationMethodsPublicKeyOrPassword = "PUBLIC_KEY_OR_PASSWORD" + + // SftpAuthenticationMethodsPublicKeyAndPassword is a SftpAuthenticationMethods enum value + SftpAuthenticationMethodsPublicKeyAndPassword = "PUBLIC_KEY_AND_PASSWORD" +) + +// SftpAuthenticationMethods_Values returns all elements of the SftpAuthenticationMethods enum +func SftpAuthenticationMethods_Values() []string { + return []string{ + SftpAuthenticationMethodsPassword, + SftpAuthenticationMethodsPublicKey, + SftpAuthenticationMethodsPublicKeyOrPassword, + SftpAuthenticationMethodsPublicKeyAndPassword, + } +} + const ( // SigningAlgSha256 is a SigningAlg enum value SigningAlgSha256 = "SHA256" From df8a5856b1df974207218d84ce47e4bbdcb28563 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Tue, 16 May 2023 11:22:41 -0700 Subject: [PATCH 7/7] Release v1.44.264 (2023-05-16) (#4841) Release v1.44.264 (2023-05-16) === ### Service Client Updates * `service/detective`: Updates service API * `service/directconnect`: Updates service documentation * This release includes an update to the mtu value for CreateTransitVirtualInterface from 9001 mtu to 8500 mtu. * `service/glue`: Updates service API and documentation * Add Support for Tags for Custom Entity Types * `service/secretsmanager`: Updates service documentation * Documentation updates for Secrets Manager * `service/wafv2`: Updates service API and documentation --- CHANGELOG.md | 13 + aws/endpoints/defaults.go | 89 ++ aws/version.go | 2 +- models/apis/detective/2018-10-26/api-2.json | 3 +- .../2018-10-26/endpoint-rule-set-1.json | 392 ++--- .../2018-10-26/endpoint-tests-1.json | 923 +++--------- .../apis/directconnect/2012-10-25/docs-2.json | 4 +- models/apis/glue/2017-03-31/api-2.json | 6 +- models/apis/glue/2017-03-31/docs-2.json | 2 + .../secretsmanager/2017-10-17/docs-2.json | 6 +- models/apis/wafv2/2019-07-29/api-2.json | 108 +- models/apis/wafv2/2019-07-29/docs-2.json | 111 +- models/endpoints/endpoints.json | 76 +- service/detective/api.go | 4 + service/directconnect/api.go | 6 +- service/glue/api.go | 18 + service/secretsmanager/api.go | 13 +- service/wafv2/api.go | 1339 ++++++++++++++--- service/wafv2/errors.go | 9 + 19 files changed, 1954 insertions(+), 1170 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 18a7c9e0e4f..bc44a823fd9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +Release v1.44.264 (2023-05-16) +=== + +### Service Client Updates +* `service/detective`: Updates service API +* `service/directconnect`: Updates service documentation + * This release includes an update to the mtu value for CreateTransitVirtualInterface from 9001 mtu to 8500 mtu. +* `service/glue`: Updates service API and documentation + * Add Support for Tags for Custom Entity Types +* `service/secretsmanager`: Updates service documentation + * Documentation updates for Secrets Manager +* `service/wafv2`: Updates service API and documentation + Release v1.44.263 (2023-05-15) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 2d665d61ba8..22d4d9f3f80 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -20401,18 +20401,63 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + }, }, }, "projects.iot1click": service{ @@ -38807,6 +38852,28 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + }, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -39468,6 +39535,28 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index 9e1a41ed169..d3f98bd1747 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.263" +const SDKVersion = "1.44.264" diff --git a/models/apis/detective/2018-10-26/api-2.json b/models/apis/detective/2018-10-26/api-2.json index ff239e33292..892b41d32f9 100644 --- a/models/apis/detective/2018-10-26/api-2.json +++ b/models/apis/detective/2018-10-26/api-2.json @@ -517,7 +517,8 @@ "type":"string", "enum":[ "DETECTIVE_CORE", - "EKS_AUDIT" + "EKS_AUDIT", + "ASFF_SECURITYHUB_FINDING" ] }, "DatasourcePackageIngestDetail":{ diff --git a/models/apis/detective/2018-10-26/endpoint-rule-set-1.json b/models/apis/detective/2018-10-26/endpoint-rule-set-1.json index c6e55e4bfe2..7227bac202c 100644 --- a/models/apis/detective/2018-10-26/endpoint-rule-set-1.json +++ b/models/apis/detective/2018-10-26/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://api.detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://api.detective-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://api.detective.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://api.detective-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://api.detective.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.detective.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://api.detective.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/detective/2018-10-26/endpoint-tests-1.json b/models/apis/detective/2018-10-26/endpoint-tests-1.json index 7d8cee9a3f5..2c6f75a9e02 100644 --- a/models/apis/detective/2018-10-26/endpoint-tests-1.json +++ b/models/apis/detective/2018-10-26/endpoint-tests-1.json @@ -1,276 +1,276 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.ap-south-1.api.aws" + "url": "https://api.detective.af-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.ap-south-1.amazonaws.com" + "url": "https://api.detective.ap-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-south-1", + "Region": "ap-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.ap-south-1.api.aws" + "url": "https://api.detective.ap-northeast-1.amazonaws.com" } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.ap-south-1.amazonaws.com" + "url": "https://api.detective.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "ap-south-1", "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-south-1.api.aws" + "url": "https://api.detective.ap-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-south-1.amazonaws.com" + "url": "https://api.detective.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "eu-south-1", + "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-south-1.api.aws" + "url": "https://api.detective.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-south-1.amazonaws.com" + "url": "https://api.detective.ca-central-1.amazonaws.com" } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "Region": "eu-south-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-gov-east-1.api.aws" + "url": "https://api.detective.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-gov-east-1.amazonaws.com" + "url": "https://api.detective.eu-north-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", + "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-gov-east-1.api.aws" + "url": "https://api.detective.eu-south-1.amazonaws.com" } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-gov-east-1.amazonaws.com" + "url": "https://api.detective.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "Region": "us-gov-east-1", "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.ca-central-1.api.aws" + "url": "https://api.detective.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.ca-central-1.amazonaws.com" + "url": "https://api.detective.eu-west-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ca-central-1", + "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.ca-central-1.api.aws" + "url": "https://api.detective.me-south-1.amazonaws.com" } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.ca-central-1.amazonaws.com" + "url": "https://api.detective.sa-east-1.amazonaws.com" } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "Region": "ca-central-1", "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-central-1.api.aws" + "url": "https://api.detective.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-central-1.amazonaws.com" + "url": "https://api.detective-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "eu-central-1", "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-central-1.api.aws" + "url": "https://api.detective.us-east-2.amazonaws.com" } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-central-1.amazonaws.com" + "url": "https://api.detective-fips.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "eu-central-1", + "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-west-1.api.aws" + "url": "https://api.detective.us-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, "Region": "us-west-1", - "UseDualStack": true + "UseFIPS": false, + "UseDualStack": false } }, { @@ -281,879 +281,312 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-west-1.api.aws" + "url": "https://api.detective.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-west-1.amazonaws.com" + "url": "https://api.detective-fips.us-west-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-west-1", + "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-west-2.api.aws" + "url": "https://api.detective-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-west-2", "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-west-2.amazonaws.com" + "url": "https://api.detective.us-east-1.api.aws" } }, "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective.us-west-2.api.aws" + "url": "https://api.detective-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "us-west-2", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-west-2.amazonaws.com" + "url": "https://api.detective-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "us-west-2", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.af-south-1.api.aws" + "url": "https://api.detective.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "af-south-1", + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.af-south-1.amazonaws.com" + "url": "https://api.detective.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "af-south-1", + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.af-south-1.api.aws" + "url": "https://api.detective.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.af-south-1.amazonaws.com" + "url": "https://api.detective-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "af-south-1", + "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-north-1.api.aws" + "url": "https://api.detective.us-gov-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-north-1.amazonaws.com" + "url": "https://api.detective-fips.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "eu-north-1", "UseDualStack": false } }, { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-north-1.api.aws" + "url": "https://api.detective-fips.us-gov-east-1.api.aws" } }, "params": { - "UseFIPS": false, - "Region": "eu-north-1", + "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-north-1.amazonaws.com" + "url": "https://api.detective.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.detective-fips.eu-west-3.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "eu-west-3", "UseDualStack": true } }, { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-west-3.amazonaws.com" + "url": "https://api.detective-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "eu-west-3", "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.detective.eu-west-3.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "eu-west-3", "UseDualStack": true } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-west-3.amazonaws.com" + "url": "https://api.detective.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "eu-west-3", "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.detective-fips.eu-west-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "eu-west-2", "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-west-2.amazonaws.com" + "url": "https://api.detective-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "eu-west-2", "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.detective.eu-west-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "eu-west-2", "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-west-2.amazonaws.com" + "url": "https://api.detective.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "eu-west-2", "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-west-1.api.aws" + "url": "https://example.com" } }, "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-southeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-southeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-southeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-southeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.us-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.us-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.us-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://api.detective.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.us-east-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": false - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" + "url": "https://example.com" } }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1164,8 +597,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1176,11 +609,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/directconnect/2012-10-25/docs-2.json b/models/apis/directconnect/2012-10-25/docs-2.json index a6144268dd8..214eba20f29 100644 --- a/models/apis/directconnect/2012-10-25/docs-2.json +++ b/models/apis/directconnect/2012-10-25/docs-2.json @@ -1163,9 +1163,9 @@ "NewPrivateVirtualInterface$mtu": "

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

", "NewPrivateVirtualInterfaceAllocation$mtu": "

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

", "NewTransitVirtualInterface$mtu": "

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 8500. The default value is 1500.

", - "NewTransitVirtualInterfaceAllocation$mtu": "

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

", + "NewTransitVirtualInterfaceAllocation$mtu": "

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 8500. The default value is 1500

", "UpdateVirtualInterfaceAttributesRequest$mtu": "

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

", - "VirtualInterface$mtu": "

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 9001. The default value is 1500.

" + "VirtualInterface$mtu": "

The maximum transmission unit (MTU), in bytes. The supported values are 1500 and 8500. The default value is 1500

" } }, "MacSecCapable": { diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index d9cdd7a67fa..1b7363b81e4 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -4926,7 +4926,8 @@ "members":{ "Name":{"shape":"NameString"}, "RegexString":{"shape":"NameString"}, - "ContextWords":{"shape":"ContextWords"} + "ContextWords":{"shape":"ContextWords"}, + "Tags":{"shape":"TagsMap"} } }, "CreateCustomEntityTypeResponse":{ @@ -8772,7 +8773,8 @@ "type":"structure", "members":{ "NextToken":{"shape":"PaginationToken"}, - "MaxResults":{"shape":"PageSize"} + "MaxResults":{"shape":"PageSize"}, + "Tags":{"shape":"TagsMap"} } }, "ListCustomEntityTypesResponse":{ diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index 3f819883df8..1a74935eecb 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -7593,6 +7593,7 @@ "CreateBlueprintRequest$Tags": "

The tags to be applied to this blueprint.

", "CreateConnectionRequest$Tags": "

The tags you assign to the connection.

", "CreateCrawlerRequest$Tags": "

The tags to use with this crawler request. You may use tags to limit access to the crawler. For more information about tags in Glue, see Amazon Web Services Tags in Glue in the developer guide.

", + "CreateCustomEntityTypeRequest$Tags": "

A list of tags applied to the custom entity type.

", "CreateDataQualityRulesetRequest$Tags": "

A list of tags applied to the data quality ruleset.

", "CreateDatabaseRequest$Tags": "

The tags you assign to the database.

", "CreateDevEndpointRequest$Tags": "

The tags to use with this DevEndpoint. You may use tags to limit access to the DevEndpoint. For more information about tags in Glue, see Amazon Web Services Tags in Glue in the developer guide.

", @@ -7608,6 +7609,7 @@ "GetTagsResponse$Tags": "

The requested tags.

", "ListBlueprintsRequest$Tags": "

Filters the list by an Amazon Web Services resource tag.

", "ListCrawlersRequest$Tags": "

Specifies to return only these tagged resources.

", + "ListCustomEntityTypesRequest$Tags": "

A list of key-value pair tags.

", "ListDataQualityRulesetsRequest$Tags": "

A list of key-value pair tags.

", "ListDevEndpointsRequest$Tags": "

Specifies to return only these tagged resources.

", "ListJobsRequest$Tags": "

Specifies to return only these tagged resources.

", diff --git a/models/apis/secretsmanager/2017-10-17/docs-2.json b/models/apis/secretsmanager/2017-10-17/docs-2.json index 5b9e7fe955f..03a9d9cf30d 100644 --- a/models/apis/secretsmanager/2017-10-17/docs-2.json +++ b/models/apis/secretsmanager/2017-10-17/docs-2.json @@ -43,7 +43,7 @@ "base": null, "refs": { "CreateSecretRequest$ForceOverwriteReplicaSecret": "

Specifies whether to overwrite a secret with the same name in the destination Region. By default, secrets aren't overwritten.

", - "DeleteSecretRequest$ForceDeleteWithoutRecovery": "

Specifies whether to delete the secret without any recovery window. You can't use both this parameter and RecoveryWindowInDays in the same call. If you don't use either, then by default Secrets Manager uses a 30 day recovery window.

Secrets Manager performs the actual deletion with an asynchronous background process, so there might be a short delay before the secret is permanently deleted. If you delete a secret and then immediately create a secret with the same name, use appropriate back off and retry logic.

Use this parameter with caution. This parameter causes the operation to skip the normal recovery window before the permanent deletion that Secrets Manager would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithoutRecovery parameter, then you have no opportunity to recover the secret. You lose the secret permanently.

", + "DeleteSecretRequest$ForceDeleteWithoutRecovery": "

Specifies whether to delete the secret without any recovery window. You can't use both this parameter and RecoveryWindowInDays in the same call. If you don't use either, then by default Secrets Manager uses a 30 day recovery window.

Secrets Manager performs the actual deletion with an asynchronous background process, so there might be a short delay before the secret is permanently deleted. If you delete a secret and then immediately create a secret with the same name, use appropriate back off and retry logic.

If you forcibly delete an already deleted or nonexistent secret, the operation does not return ResourceNotFoundException.

Use this parameter with caution. This parameter causes the operation to skip the normal recovery window before the permanent deletion that Secrets Manager would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithoutRecovery parameter, then you have no opportunity to recover the secret. You lose the secret permanently.

", "ListSecretVersionIdsRequest$IncludeDeprecated": "

Specifies whether to include versions of secrets that don't have any staging labels attached to them. Versions without staging labels are considered deprecated and are subject to deletion by Secrets Manager. By default, versions without staging labels aren't included.

", "ListSecretsRequest$IncludePlannedDeletion": "

Specifies whether to include secrets scheduled for deletion. By default, secrets scheduled for deletion aren't included.

", "PutResourcePolicyRequest$BlockPublicPolicy": "

Specifies whether to block resource-based policies that allow broad access to the secret, for example those that use a wildcard for the principal. By default, public policies aren't blocked.

", @@ -381,8 +381,8 @@ "NextRotationDateType": { "base": null, "refs": { - "DescribeSecretResponse$NextRotationDate": "

The next date and time that Secrets Manager will rotate the secret, rounded to the nearest hour. If the secret isn't configured for rotation, Secrets Manager returns null.

", - "SecretListEntry$NextRotationDate": "

The next date and time that Secrets Manager will attempt to rotate the secret, rounded to the nearest hour. This value is null if the secret is not set up for rotation.

" + "DescribeSecretResponse$NextRotationDate": "

The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation, Secrets Manager returns null.

", + "SecretListEntry$NextRotationDate": "

The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation, Secrets Manager returns null.

" } }, "NextTokenType": { diff --git a/models/apis/wafv2/2019-07-29/api-2.json b/models/apis/wafv2/2019-07-29/api-2.json index db13de515ac..5938d7ea6f2 100755 --- a/models/apis/wafv2/2019-07-29/api-2.json +++ b/models/apis/wafv2/2019-07-29/api-2.json @@ -419,7 +419,8 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidParameterException"}, {"shape":"WAFNonexistentItemException"}, - {"shape":"WAFInvalidOperationException"} + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFUnsupportedAggregateKeyTypeException"} ] }, "GetRegexPatternSet":{ @@ -2362,6 +2363,12 @@ "LabelName":{"shape":"LabelName"} } }, + "LabelNamespace":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[0-9A-Za-z_\\-:]+:$" + }, "LabelSummaries":{ "type":"list", "member":{"shape":"LabelSummary"} @@ -2883,7 +2890,9 @@ "CHALLENGE_CONFIG", "TOKEN_DOMAIN", "ATP_RULE_SET_RESPONSE_INSPECTION", - "ASSOCIATED_RESOURCE_TYPE" + "ASSOCIATED_RESOURCE_TYPE", + "SCOPE_DOWN", + "CUSTOM_KEYS" ] }, "ParameterExceptionParameter":{ @@ -3000,16 +3009,38 @@ "Limit":{"shape":"RateLimit"}, "AggregateKeyType":{"shape":"RateBasedStatementAggregateKeyType"}, "ScopeDownStatement":{"shape":"Statement"}, - "ForwardedIPConfig":{"shape":"ForwardedIPConfig"} + "ForwardedIPConfig":{"shape":"ForwardedIPConfig"}, + "CustomKeys":{"shape":"RateBasedStatementCustomKeys"} } }, "RateBasedStatementAggregateKeyType":{ "type":"string", "enum":[ "IP", - "FORWARDED_IP" + "FORWARDED_IP", + "CUSTOM_KEYS", + "CONSTANT" ] }, + "RateBasedStatementCustomKey":{ + "type":"structure", + "members":{ + "Header":{"shape":"RateLimitHeader"}, + "Cookie":{"shape":"RateLimitCookie"}, + "QueryArgument":{"shape":"RateLimitQueryArgument"}, + "QueryString":{"shape":"RateLimitQueryString"}, + "HTTPMethod":{"shape":"RateLimitHTTPMethod"}, + "ForwardedIP":{"shape":"RateLimitForwardedIP"}, + "IP":{"shape":"RateLimitIP"}, + "LabelNamespace":{"shape":"RateLimitLabelNamespace"} + } + }, + "RateBasedStatementCustomKeys":{ + "type":"list", + "member":{"shape":"RateBasedStatementCustomKey"}, + "max":5, + "min":1 + }, "RateBasedStatementManagedKeysIPSet":{ "type":"structure", "members":{ @@ -3022,6 +3053,68 @@ "max":2000000000, "min":100 }, + "RateLimitCookie":{ + "type":"structure", + "required":[ + "Name", + "TextTransformations" + ], + "members":{ + "Name":{"shape":"FieldToMatchData"}, + "TextTransformations":{"shape":"TextTransformations"} + } + }, + "RateLimitForwardedIP":{ + "type":"structure", + "members":{ + } + }, + "RateLimitHTTPMethod":{ + "type":"structure", + "members":{ + } + }, + "RateLimitHeader":{ + "type":"structure", + "required":[ + "Name", + "TextTransformations" + ], + "members":{ + "Name":{"shape":"FieldToMatchData"}, + "TextTransformations":{"shape":"TextTransformations"} + } + }, + "RateLimitIP":{ + "type":"structure", + "members":{ + } + }, + "RateLimitLabelNamespace":{ + "type":"structure", + "required":["Namespace"], + "members":{ + "Namespace":{"shape":"LabelNamespace"} + } + }, + "RateLimitQueryArgument":{ + "type":"structure", + "required":[ + "Name", + "TextTransformations" + ], + "members":{ + "Name":{"shape":"FieldToMatchData"}, + "TextTransformations":{"shape":"TextTransformations"} + } + }, + "RateLimitQueryString":{ + "type":"structure", + "required":["TextTransformations"], + "members":{ + "TextTransformations":{"shape":"TextTransformations"} + } + }, "RedactedFields":{ "type":"list", "member":{"shape":"FieldToMatch"}, @@ -3998,6 +4091,13 @@ }, "exception":true }, + "WAFUnsupportedAggregateKeyTypeException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, "WebACL":{ "type":"structure", "required":[ diff --git a/models/apis/wafv2/2019-07-29/docs-2.json b/models/apis/wafv2/2019-07-29/docs-2.json index 81d0f343fca..8c94b05b6f4 100755 --- a/models/apis/wafv2/2019-07-29/docs-2.json +++ b/models/apis/wafv2/2019-07-29/docs-2.json @@ -25,7 +25,7 @@ "GetManagedRuleSet": "

Retrieves the specified managed rule set.

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

", "GetMobileSdkRelease": "

Retrieves information for the specified mobile SDK release, including release notes and tags.

The mobile SDK is not generally available. Customers who have access to the mobile SDK can use it to establish and manage WAF tokens for use in HTTP(S) requests from a mobile device to WAF. For more information, see WAF client application integration in the WAF Developer Guide.

", "GetPermissionPolicy": "

Returns the IAM policy that is attached to the specified rule group.

You must be the owner of the rule group to perform this operation.

", - "GetRateBasedStatementManagedKeys": "

Retrieves the keys that are currently blocked by a rate-based rule instance. The maximum number of managed keys that can be blocked for a single rate-based rule instance is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.

For a rate-based rule that you've defined inside a rule group, provide the name of the rule group reference statement in your request, in addition to the rate-based rule name and the web ACL name.

WAF monitors web requests and manages keys independently for each unique combination of web ACL, optional rule group, and rate-based rule. For example, if you define a rate-based rule inside a rule group, and then use the rule group in a web ACL, WAF monitors web requests and manages keys for that web ACL, rule group reference statement, and rate-based rule instance. If you use the same rule group in a second web ACL, WAF monitors web requests and manages keys for this second usage completely independent of your first.

", + "GetRateBasedStatementManagedKeys": "

Retrieves the IP addresses that are currently blocked by a rate-based rule instance. This is only available for rate-based rules that aggregate solely on the IP address or on the forwarded IP address.

The maximum number of addresses that can be blocked for a single rate-based rule instance is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.

For a rate-based rule that you've defined inside a rule group, provide the name of the rule group reference statement in your request, in addition to the rate-based rule name and the web ACL name.

WAF monitors web requests and manages keys independently for each unique combination of web ACL, optional rule group, and rate-based rule. For example, if you define a rate-based rule inside a rule group, and then use the rule group in a web ACL, WAF monitors web requests and manages keys for that web ACL, rule group reference statement, and rate-based rule instance. If you use the same rule group in a second web ACL, WAF monitors web requests and manages keys for this second usage completely independent of your first.

", "GetRegexPatternSet": "

Retrieves the specified RegexPatternSet.

", "GetRuleGroup": "

Retrieves the specified RuleGroup.

", "GetSampledRequests": "

Gets detailed information about a specified number of requests--a sample--that WAF randomly selects from among the first 5,000 requests that your Amazon Web Services resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.

GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which WAF selected the requests in the sample.

", @@ -669,7 +669,8 @@ "WAFSubscriptionNotFoundException$Message": null, "WAFTagOperationException$Message": null, "WAFTagOperationInternalErrorException$Message": null, - "WAFUnavailableEntityException$Message": null + "WAFUnavailableEntityException$Message": null, + "WAFUnsupportedAggregateKeyTypeException$Message": null } }, "ErrorReason": { @@ -743,6 +744,9 @@ "base": null, "refs": { "HeaderNames$member": null, + "RateLimitCookie$Name": "

The name of the cookie to use.

", + "RateLimitHeader$Name": "

The name of the header to use.

", + "RateLimitQueryArgument$Name": "

The name of the query argument to use.

", "SingleHeader$Name": "

The name of the query header to inspect.

", "SingleQueryArgument$Name": "

The name of the query argument to inspect.

" } @@ -795,7 +799,7 @@ "base": "

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

This configuration is used for GeoMatchStatement and RateBasedStatement. For IPSetReferenceStatement, use IPSetForwardedIPConfig instead.

WAF only evaluates the first IP address found in the specified HTTP header.

", "refs": { "GeoMatchStatement$ForwardedIPConfig": "

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

", - "RateBasedStatement$ForwardedIPConfig": "

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

This is required if AggregateKeyType is set to FORWARDED_IP.

" + "RateBasedStatement$ForwardedIPConfig": "

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

This is required if you specify a forwarded IP in the rule's aggregate key settings.

" } }, "ForwardedIPHeaderName": { @@ -1155,6 +1159,12 @@ "Condition$LabelNameCondition": "

A single label name condition. This is the fully qualified label name that a log record must contain in order to meet the condition. Fully qualified labels have a prefix, optional namespaces, and label name. The prefix identifies the rule group or web ACL context of the rule that added the label.

" } }, + "LabelNamespace": { + "base": null, + "refs": { + "RateLimitLabelNamespace$Namespace": "

The namespace to use for aggregation.

" + } + }, "LabelSummaries": { "base": null, "refs": { @@ -1642,19 +1652,31 @@ } }, "RateBasedStatement": { - "base": "

A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.

When the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet the criteria of both of the nested statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet the criteria of both of the nested statements are not counted towards the rate limit and are not affected by this rule.

You cannot nest a RateBasedStatement inside another statement, for example inside a NotStatement or OrStatement. You can define a RateBasedStatement inside a web ACL and inside a rule group.

", + "base": "

A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.

You can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.

Each unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.

For example, assume the rule evaluates web requests with the following IP address and HTTP method values:

The rule would create different aggregation instances according to your aggregation criteria, for example:

For any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which WAF counts and rate-limits individually.

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.

You cannot nest a RateBasedStatement inside another statement, for example inside a NotStatement or OrStatement. You can define a RateBasedStatement inside a web ACL and inside a rule group.

For additional information about the options, see Rate limiting web requests using rate-based rules in the WAF Developer Guide.

If you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that WAF is currently rate limiting for a rule through the API call GetRateBasedStatementManagedKeys. This option is not available for other aggregation configurations.

WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.

", "refs": { - "Statement$RateBasedStatement": "

A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.

When the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet the criteria of both of the nested statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet the criteria of both of the nested statements are not counted towards the rate limit and are not affected by this rule.

You cannot nest a RateBasedStatement inside another statement, for example inside a NotStatement or OrStatement. You can define a RateBasedStatement inside a web ACL and inside a rule group.

" + "Statement$RateBasedStatement": "

A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.

You can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.

Each unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.

For example, assume the rule evaluates web requests with the following IP address and HTTP method values:

The rule would create different aggregation instances according to your aggregation criteria, for example:

For any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which WAF counts and rate-limits individually.

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.

You cannot nest a RateBasedStatement inside another statement, for example inside a NotStatement or OrStatement. You can define a RateBasedStatement inside a web ACL and inside a rule group.

For additional information about the options, see Rate limiting web requests using rate-based rules in the WAF Developer Guide.

If you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that WAF is currently rate limiting for a rule through the API call GetRateBasedStatementManagedKeys. This option is not available for other aggregation configurations.

WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.

" } }, "RateBasedStatementAggregateKeyType": { "base": null, "refs": { - "RateBasedStatement$AggregateKeyType": "

Setting that indicates how to aggregate the request counts. The options are the following:

" + "RateBasedStatement$AggregateKeyType": "

Setting that indicates how to aggregate the request counts.

Web requests that are missing any of the components specified in the aggregation keys are omitted from the rate-based rule evaluation and handling.

" + } + }, + "RateBasedStatementCustomKey": { + "base": "

Specifies a single custom aggregate key for a rate-base rule.

Web requests that are missing any of the components specified in the aggregation keys are omitted from the rate-based rule evaluation and handling.

", + "refs": { + "RateBasedStatementCustomKeys$member": null + } + }, + "RateBasedStatementCustomKeys": { + "base": null, + "refs": { + "RateBasedStatement$CustomKeys": "

Specifies the aggregate keys to use in a rate-base rule.

" } }, "RateBasedStatementManagedKeysIPSet": { - "base": "

The set of IP addresses that are currently blocked for a RateBasedStatement.

", + "base": "

The set of IP addresses that are currently blocked for a RateBasedStatement. This is only available for rate-based rules that aggregate on just the IP address, with the AggregateKeyType set to IP or FORWARDED_IP.

A rate-based rule applies its rule action to requests from IP addresses that are in the rule's managed keys list and that match the rule's scope-down statement. When a rule has no scope-down statement, it applies the action to all requests from the IP addresses that are in the list. The rule applies its rule action to rate limit the matching requests. The action is usually Block but it can be any valid rule action except for Allow.

The maximum number of IP addresses that can be rate limited by a single rate-based rule instance is 10,000. If more than 10,000 addresses exceed the rate limit, WAF limits those with the highest rates.

", "refs": { "GetRateBasedStatementManagedKeysResponse$ManagedKeysIPV4": "

The keys that are of Internet Protocol version 4 (IPv4).

", "GetRateBasedStatementManagedKeysResponse$ManagedKeysIPV6": "

The keys that are of Internet Protocol version 6 (IPv6).

" @@ -1663,7 +1685,55 @@ "RateLimit": { "base": null, "refs": { - "RateBasedStatement$Limit": "

The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopeDownStatement, this limit is applied only to the requests that match the statement.

" + "RateBasedStatement$Limit": "

The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a ScopeDownStatement, this limit is applied only to the requests that match the statement.

Examples:

" + } + }, + "RateLimitCookie": { + "base": "

Specifies a cookie as an aggregate key for a rate-based rule. Each distinct value in the cookie contributes to the aggregation instance. If you use a single cookie as your custom key, then each value fully defines an aggregation instance.

", + "refs": { + "RateBasedStatementCustomKey$Cookie": "

Use the value of a cookie in the request as an aggregate key. Each distinct value in the cookie contributes to the aggregation instance. If you use a single cookie as your custom key, then each value fully defines an aggregation instance.

" + } + }, + "RateLimitForwardedIP": { + "base": "

Specifies the first IP address in an HTTP header as an aggregate key for a rate-based rule. Each distinct forwarded IP address contributes to the aggregation instance.

This setting is used only in the RateBasedStatementCustomKey specification of a rate-based rule statement. When you specify an IP or forwarded IP in the custom key settings, you must also specify at least one other key to use. You can aggregate on only the forwarded IP address by specifying FORWARDED_IP in your rate-based statement's AggregateKeyType.

This data type supports using the forwarded IP address in the web request aggregation for a rate-based rule, in RateBasedStatementCustomKey. The JSON specification for using the forwarded IP address doesn't explicitly use this data type.

JSON specification: \"ForwardedIP\": {}

When you use this specification, you must also configure the forwarded IP address in the rate-based statement's ForwardedIPConfig.

", + "refs": { + "RateBasedStatementCustomKey$ForwardedIP": "

Use the first IP address in an HTTP header as an aggregate key. Each distinct forwarded IP address contributes to the aggregation instance.

When you specify an IP or forwarded IP in the custom key settings, you must also specify at least one other key to use. You can aggregate on only the forwarded IP address by specifying FORWARDED_IP in your rate-based statement's AggregateKeyType.

With this option, you must specify the header to use in the rate-based rule's ForwardedIPConfig property.

" + } + }, + "RateLimitHTTPMethod": { + "base": "

Specifies the request's HTTP method as an aggregate key for a rate-based rule. Each distinct HTTP method contributes to the aggregation instance. If you use just the HTTP method as your custom key, then each method fully defines an aggregation instance.

JSON specification: \"RateLimitHTTPMethod\": {}

", + "refs": { + "RateBasedStatementCustomKey$HTTPMethod": "

Use the request's HTTP method as an aggregate key. Each distinct HTTP method contributes to the aggregation instance. If you use just the HTTP method as your custom key, then each method fully defines an aggregation instance.

" + } + }, + "RateLimitHeader": { + "base": "

Specifies a header as an aggregate key for a rate-based rule. Each distinct value in the header contributes to the aggregation instance. If you use a single header as your custom key, then each value fully defines an aggregation instance.

", + "refs": { + "RateBasedStatementCustomKey$Header": "

Use the value of a header in the request as an aggregate key. Each distinct value in the header contributes to the aggregation instance. If you use a single header as your custom key, then each value fully defines an aggregation instance.

" + } + }, + "RateLimitIP": { + "base": "

Specifies the IP address in the web request as an aggregate key for a rate-based rule. Each distinct IP address contributes to the aggregation instance.

This setting is used only in the RateBasedStatementCustomKey specification of a rate-based rule statement. To use this in the custom key settings, you must specify at least one other key to use, along with the IP address. To aggregate on only the IP address, in your rate-based statement's AggregateKeyType, specify IP.

JSON specification: \"RateLimitIP\": {}

", + "refs": { + "RateBasedStatementCustomKey$IP": "

Use the request's originating IP address as an aggregate key. Each distinct IP address contributes to the aggregation instance.

When you specify an IP or forwarded IP in the custom key settings, you must also specify at least one other key to use. You can aggregate on only the IP address by specifying IP in your rate-based statement's AggregateKeyType.

" + } + }, + "RateLimitLabelNamespace": { + "base": "

Specifies a label namespace to use as an aggregate key for a rate-based rule. Each distinct fully qualified label name that has the specified label namespace contributes to the aggregation instance. If you use just one label namespace as your custom key, then each label name fully defines an aggregation instance.

This uses only labels that have been added to the request by rules that are evaluated before this rate-based rule in the web ACL.

For information about label namespaces and names, see Label syntax and naming requirements in the WAF Developer Guide.

", + "refs": { + "RateBasedStatementCustomKey$LabelNamespace": "

Use the specified label namespace as an aggregate key. Each distinct fully qualified label name that has the specified label namespace contributes to the aggregation instance. If you use just one label namespace as your custom key, then each label name fully defines an aggregation instance.

This uses only labels that have been added to the request by rules that are evaluated before this rate-based rule in the web ACL.

For information about label namespaces and names, see Label syntax and naming requirements in the WAF Developer Guide.

" + } + }, + "RateLimitQueryArgument": { + "base": "

Specifies a query argument in the request as an aggregate key for a rate-based rule. Each distinct value for the named query argument contributes to the aggregation instance. If you use a single query argument as your custom key, then each value fully defines an aggregation instance.

", + "refs": { + "RateBasedStatementCustomKey$QueryArgument": "

Use the specified query argument as an aggregate key. Each distinct value for the named query argument contributes to the aggregation instance. If you use a single query argument as your custom key, then each value fully defines an aggregation instance.

" + } + }, + "RateLimitQueryString": { + "base": "

Specifies the request's query string as an aggregate key for a rate-based rule. Each distinct string contributes to the aggregation instance. If you use just the query string as your custom key, then each string fully defines an aggregation instance.

", + "refs": { + "RateBasedStatementCustomKey$QueryString": "

Use the request's query string as an aggregate key. Each distinct string contributes to the aggregation instance. If you use just the query string as your custom key, then each string fully defines an aggregation instance.

" } }, "RedactedFields": { @@ -2130,7 +2200,7 @@ "refs": { "ManagedRuleGroupStatement$ScopeDownStatement": "

An optional nested statement that narrows the scope of the web requests that are evaluated by the managed rule group. Requests are only evaluated by the rule group if they match the scope-down statement. You can use any nestable Statement in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement.

", "NotStatement$Statement": "

The statement to negate. You can use any statement that can be nested.

", - "RateBasedStatement$ScopeDownStatement": "

An optional nested statement that narrows the scope of the web requests that are evaluated by the rate-based statement. Requests are only tracked by the rate-based statement if they match the scope-down statement. You can use any nestable Statement in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement.

", + "RateBasedStatement$ScopeDownStatement": "

An optional nested statement that narrows the scope of the web requests that are evaluated and managed by the rate-based statement. When you use a scope-down statement, the rate-based rule only tracks and rate limits requests that match the scope-down statement. You can use any nestable Statement in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement.

", "Rule$Statement": "

The WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement.

", "Statements$member": null } @@ -2224,7 +2294,7 @@ "TextTransformationPriority": { "base": null, "refs": { - "TextTransformation$Priority": "

Sets the relative processing order for multiple transformations that are defined for a rule statement. WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don't need to be consecutive, but they must all be different.

" + "TextTransformation$Priority": "

Sets the relative processing order for multiple transformations. WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don't need to be consecutive, but they must all be different.

" } }, "TextTransformationType": { @@ -2236,12 +2306,16 @@ "TextTransformations": { "base": null, "refs": { - "ByteMatchStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

", - "RegexMatchStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

", - "RegexPatternSetReferenceStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

", - "SizeConstraintStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

", - "SqliMatchStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

", - "XssMatchStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

" + "ByteMatchStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

", + "RateLimitCookie$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

", + "RateLimitHeader$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

", + "RateLimitQueryArgument$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

", + "RateLimitQueryString$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

", + "RegexMatchStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

", + "RegexPatternSetReferenceStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

", + "SizeConstraintStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

", + "SqliMatchStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

", + "XssMatchStatement$TextTransformations": "

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

" } }, "TimeWindow": { @@ -2522,6 +2596,11 @@ "refs": { } }, + "WAFUnsupportedAggregateKeyTypeException": { + "base": "

The rule that you've named doesn't aggregate solely on the IP address or solely on the forwarded IP address. This call is only available for rate-based rules with an AggregateKeyType setting of IP or FORWARDED_IP.

", + "refs": { + } + }, "WebACL": { "base": "

A web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.

", "refs": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 59da1fba439..6b9002532d2 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -11777,11 +11777,47 @@ "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "profile-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-west-2" : { }, - "us-east-1" : { }, - "us-west-2" : { } + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "profile-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "profile-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "profile-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "profile-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "profile-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "projects.iot1click" : { @@ -23522,6 +23558,23 @@ "us-iso-west-1" : { } } }, + "rbin" : { + "endpoints" : { + "fips-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "rbin-fips.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "rbin-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + } + } + }, "rds" : { "endpoints" : { "us-iso-east-1" : { }, @@ -23981,6 +24034,23 @@ "us-isob-east-1" : { } } }, + "rbin" : { + "endpoints" : { + "fips-us-isob-east-1" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "rbin-fips.us-isob-east-1.sc2s.sgov.gov" + }, + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + } + } + }, "rds" : { "endpoints" : { "us-isob-east-1" : { } diff --git a/service/detective/api.go b/service/detective/api.go index 5f2bbbeb545..a16562bca18 100644 --- a/service/detective/api.go +++ b/service/detective/api.go @@ -5962,6 +5962,9 @@ const ( // DatasourcePackageEksAudit is a DatasourcePackage enum value DatasourcePackageEksAudit = "EKS_AUDIT" + + // DatasourcePackageAsffSecurityhubFinding is a DatasourcePackage enum value + DatasourcePackageAsffSecurityhubFinding = "ASFF_SECURITYHUB_FINDING" ) // DatasourcePackage_Values returns all elements of the DatasourcePackage enum @@ -5969,6 +5972,7 @@ func DatasourcePackage_Values() []string { return []string{ DatasourcePackageDetectiveCore, DatasourcePackageEksAudit, + DatasourcePackageAsffSecurityhubFinding, } } diff --git a/service/directconnect/api.go b/service/directconnect/api.go index 06ecbfbe9cc..c20adbce31e 100644 --- a/service/directconnect/api.go +++ b/service/directconnect/api.go @@ -12938,7 +12938,7 @@ type NewTransitVirtualInterfaceAllocation struct { CustomerAddress *string `locationName:"customerAddress" type:"string"` // The maximum transmission unit (MTU), in bytes. The supported values are 1500 - // and 9001. The default value is 1500. + // and 8500. The default value is 1500 Mtu *int64 `locationName:"mtu" type:"integer"` // The tags associated with the transitive virtual interface. @@ -14435,7 +14435,7 @@ type UpdateVirtualInterfaceAttributesOutput struct { Location *string `locationName:"location" type:"string"` // The maximum transmission unit (MTU), in bytes. The supported values are 1500 - // and 9001. The default value is 1500. + // and 8500. The default value is 1500 Mtu *int64 `locationName:"mtu" type:"integer"` // The ID of the Amazon Web Services account that owns the virtual interface. @@ -14780,7 +14780,7 @@ type VirtualInterface struct { Location *string `locationName:"location" type:"string"` // The maximum transmission unit (MTU), in bytes. The supported values are 1500 - // and 9001. The default value is 1500. + // and 8500. The default value is 1500 Mtu *int64 `locationName:"mtu" type:"integer"` // The ID of the Amazon Web Services account that owns the virtual interface. diff --git a/service/glue/api.go b/service/glue/api.go index 2a95c0e6cdd..a0deffb2410 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -29921,6 +29921,9 @@ type CreateCustomEntityTypeInput struct { // // RegexString is a required field RegexString *string `min:"1" type:"string" required:"true"` + + // A list of tags applied to the custom entity type. + Tags map[string]*string `type:"map"` } // String returns the string representation. @@ -29984,6 +29987,12 @@ func (s *CreateCustomEntityTypeInput) SetRegexString(v string) *CreateCustomEnti return s } +// SetTags sets the Tags field's value. +func (s *CreateCustomEntityTypeInput) SetTags(v map[string]*string) *CreateCustomEntityTypeInput { + s.Tags = v + return s +} + type CreateCustomEntityTypeOutput struct { _ struct{} `type:"structure"` @@ -52849,6 +52858,9 @@ type ListCustomEntityTypesInput struct { // A paginated token to offset the results. NextToken *string `type:"string"` + + // A list of key-value pair tags. + Tags map[string]*string `type:"map"` } // String returns the string representation. @@ -52894,6 +52906,12 @@ func (s *ListCustomEntityTypesInput) SetNextToken(v string) *ListCustomEntityTyp return s } +// SetTags sets the Tags field's value. +func (s *ListCustomEntityTypesInput) SetTags(v map[string]*string) *ListCustomEntityTypesInput { + s.Tags = v + return s +} + type ListCustomEntityTypesOutput struct { _ struct{} `type:"structure"` diff --git a/service/secretsmanager/api.go b/service/secretsmanager/api.go index 4380a5c3236..44b2f36afcd 100644 --- a/service/secretsmanager/api.go +++ b/service/secretsmanager/api.go @@ -3427,6 +3427,9 @@ type DeleteSecretInput struct { // deleted. If you delete a secret and then immediately create a secret with // the same name, use appropriate back off and retry logic. // + // If you forcibly delete an already deleted or nonexistent secret, the operation + // does not return ResourceNotFoundException. + // // Use this parameter with caution. This parameter causes the operation to skip // the normal recovery window before the permanent deletion that Secrets Manager // would normally impose with the RecoveryWindowInDays parameter. If you delete @@ -3647,9 +3650,8 @@ type DescribeSecretOutput struct { // The name of the secret. Name *string `min:"1" type:"string"` - // The next date and time that Secrets Manager will rotate the secret, rounded - // to the nearest hour. If the secret isn't configured for rotation, Secrets - // Manager returns null. + // The next rotation is scheduled to occur on or before this date. If the secret + // isn't configured for rotation, Secrets Manager returns null. NextRotationDate *time.Time `type:"timestamp"` // The ID of the service that created this secret. For more information, see @@ -6500,9 +6502,8 @@ type SecretListEntry struct { // in the folder prod. Name *string `min:"1" type:"string"` - // The next date and time that Secrets Manager will attempt to rotate the secret, - // rounded to the nearest hour. This value is null if the secret is not set - // up for rotation. + // The next rotation is scheduled to occur on or before this date. If the secret + // isn't configured for rotation, Secrets Manager returns null. NextRotationDate *time.Time `type:"timestamp"` // Returns the name of the service that created the secret. diff --git a/service/wafv2/api.go b/service/wafv2/api.go index 7c55ad895f3..f8f980dc312 100644 --- a/service/wafv2/api.go +++ b/service/wafv2/api.go @@ -2814,8 +2814,11 @@ func (c *WAFV2) GetRateBasedStatementManagedKeysRequest(input *GetRateBasedState // GetRateBasedStatementManagedKeys API operation for AWS WAFV2. // -// Retrieves the keys that are currently blocked by a rate-based rule instance. -// The maximum number of managed keys that can be blocked for a single rate-based +// Retrieves the IP addresses that are currently blocked by a rate-based rule +// instance. This is only available for rate-based rules that aggregate solely +// on the IP address or on the forwarded IP address. +// +// The maximum number of addresses that can be blocked for a single rate-based // rule instance is 10,000. If more than 10,000 addresses exceed the rate limit, // those with the highest rates are blocked. // @@ -2868,6 +2871,11 @@ func (c *WAFV2) GetRateBasedStatementManagedKeysRequest(input *GetRateBasedState // - WAFInvalidOperationException // The operation isn't valid. // +// - WAFUnsupportedAggregateKeyTypeException +// The rule that you've named doesn't aggregate solely on the IP address or +// solely on the forwarded IP address. This call is only available for rate-based +// rules with an AggregateKeyType setting of IP or FORWARDED_IP. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetRateBasedStatementManagedKeys func (c *WAFV2) GetRateBasedStatementManagedKeys(input *GetRateBasedStatementManagedKeysInput) (*GetRateBasedStatementManagedKeysOutput, error) { req, out := c.GetRateBasedStatementManagedKeysRequest(input) @@ -6910,10 +6918,13 @@ type ByteMatchStatement struct { SearchString []byte `type:"blob" required:"true"` // Text transformations eliminate some of the unusual formatting that attackers - // use in web requests in an effort to bypass detection. If you specify one - // or more transformations in a rule statement, WAF performs all transformations - // on the content of the request component identified by FieldToMatch, starting - // from the lowest priority setting, before inspecting the content for a match. + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -17314,60 +17325,121 @@ func (s QueryString) GoString() string { return s.String() } -// A rate-based rule tracks the rate of requests for each originating IP address, -// and triggers the rule action when the rate exceeds a limit that you specify -// on the number of requests in any 5-minute time span. You can use this to -// put a temporary block on requests from an IP address that is sending excessive -// requests. +// A rate-based rule counts incoming requests and rate limits requests when +// they are coming at too fast a rate. The rule categorizes requests according +// to your aggregation criteria, collects them into aggregation instances, and +// counts and rate limits the requests for each instance. // -// WAF tracks and manages web requests separately for each instance of a rate-based -// rule that you use. For example, if you provide the same rate-based rule settings -// in two web ACLs, each of the two rule statements represents a separate instance -// of the rate-based rule and gets its own tracking and management by WAF. If -// you define a rate-based rule inside a rule group, and then use that rule -// group in multiple places, each use creates a separate instance of the rate-based -// rule that gets its own tracking and management by WAF. +// You can specify individual aggregation keys, like IP address or HTTP method. +// You can also specify aggregation key combinations, like IP address and HTTP +// method, or HTTP method, query argument, and cookie. // -// When the rule action triggers, WAF blocks additional requests from the IP -// address until the request rate falls below the limit. +// Each unique set of values for the aggregation keys that you specify is a +// separate aggregation instance, with the value from each key contributing +// to the aggregation instance definition. // -// You can optionally nest another statement inside the rate-based statement, -// to narrow the scope of the rule so that it only counts requests that match -// the nested statement. For example, based on recent requests that you have -// seen from an attacker, you might create a rate-based rule with a nested AND -// rule statement that contains the following nested statements: +// For example, assume the rule evaluates web requests with the following IP +// address and HTTP method values: +// +// - IP address 10.1.1.1, HTTP method POST +// +// - IP address 10.1.1.1, HTTP method GET +// +// - IP address 127.0.0.0, HTTP method POST +// +// - IP address 10.1.1.1, HTTP method GET +// +// The rule would create different aggregation instances according to your aggregation +// criteria, for example: +// +// - If the aggregation criteria is just the IP address, then each individual +// address is an aggregation instance, and WAF counts requests separately +// for each. The aggregation instances and request counts for our example +// would be the following: IP address 10.1.1.1: count 3 IP address 127.0.0.0: +// count 1 // -// - An IP match statement with an IP set that specifies the address 192.0.2.44. +// - If the aggregation criteria is HTTP method, then each individual HTTP +// method is an aggregation instance. The aggregation instances and request +// counts for our example would be the following: HTTP method POST: count +// 2 HTTP method GET: count 2 // -// - A string match statement that searches in the User-Agent header for -// the string BadBot. +// - If the aggregation criteria is IP address and HTTP method, then each +// IP address and each HTTP method would contribute to the combined aggregation +// instance. The aggregation instances and request counts for our example +// would be the following: IP address 10.1.1.1, HTTP method POST: count 1 +// IP address 10.1.1.1, HTTP method GET: count 2 IP address 127.0.0.0, HTTP +// method POST: count 1 // -// In this rate-based rule, you also define a rate limit. For this example, -// the rate limit is 1,000. Requests that meet the criteria of both of the nested -// statements are counted. If the count exceeds 1,000 requests per five minutes, -// the rule action triggers. Requests that do not meet the criteria of both -// of the nested statements are not counted towards the rate limit and are not -// affected by this rule. +// For any n-tuple of aggregation keys, each unique combination of values for +// the keys defines a separate aggregation instance, which WAF counts and rate-limits +// individually. +// +// You can optionally nest another statement inside the rate-based statement, +// to narrow the scope of the rule so that it only counts and rate limits requests +// that match the nested statement. You can use this nested scope-down statement +// in conjunction with your aggregation key specifications or you can just count +// and rate limit all requests that match the scope-down statement, without +// additional aggregation. When you choose to just manage all requests that +// match a scope-down statement, the aggregation instance is singular for the +// rule. // // You cannot nest a RateBasedStatement inside another statement, for example // inside a NotStatement or OrStatement. You can define a RateBasedStatement // inside a web ACL and inside a rule group. +// +// For additional information about the options, see Rate limiting web requests +// using rate-based rules (https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) +// in the WAF Developer Guide. +// +// If you only aggregate on the individual IP address or forwarded IP address, +// you can retrieve the list of IP addresses that WAF is currently rate limiting +// for a rule through the API call GetRateBasedStatementManagedKeys. This option +// is not available for other aggregation configurations. +// +// WAF tracks and manages web requests separately for each instance of a rate-based +// rule that you use. For example, if you provide the same rate-based rule settings +// in two web ACLs, each of the two rule statements represents a separate instance +// of the rate-based rule and gets its own tracking and management by WAF. If +// you define a rate-based rule inside a rule group, and then use that rule +// group in multiple places, each use creates a separate instance of the rate-based +// rule that gets its own tracking and management by WAF. type RateBasedStatement struct { _ struct{} `type:"structure"` - // Setting that indicates how to aggregate the request counts. The options are - // the following: + // Setting that indicates how to aggregate the request counts. // - // * IP - Aggregate the request counts on the IP address from the web request - // origin. + // Web requests that are missing any of the components specified in the aggregation + // keys are omitted from the rate-based rule evaluation and handling. + // + // * CONSTANT - Count and limit the requests that match the rate-based rule's + // scope-down statement. With this option, the counted requests aren't further + // aggregated. The scope-down statement is the only specification used. When + // the count of all requests that satisfy the scope-down statement goes over + // the limit, WAF applies the rule action to all requests that satisfy the + // scope-down statement. With this option, you must configure the ScopeDownStatement + // property. + // + // * CUSTOM_KEYS - Aggregate the request counts using one or more web request + // components as the aggregate keys. With this option, you must specify the + // aggregate keys in the CustomKeys property. To aggregate on only the IP + // address or only the forwarded IP address, don't use custom keys. Instead, + // set the aggregate key type to IP or FORWARDED_IP. // // * FORWARDED_IP - Aggregate the request counts on the first IP address - // in an HTTP header. If you use this, configure the ForwardedIPConfig, to - // specify the header to use. + // in an HTTP header. With this option, you must specify the header to use + // in the ForwardedIPConfig property. To aggregate on a combination of the + // forwarded IP address with other aggregate keys, use CUSTOM_KEYS. + // + // * IP - Aggregate the request counts on the IP address from the web request + // origin. To aggregate on a combination of the IP address with other aggregate + // keys, use CUSTOM_KEYS. // // AggregateKeyType is a required field AggregateKeyType *string `type:"string" required:"true" enum:"RateBasedStatementAggregateKeyType"` + // Specifies the aggregate keys to use in a rate-base rule. + CustomKeys []*RateBasedStatementCustomKey `min:"1" type:"list"` + // The configuration for inspecting IP addresses in an HTTP header that you // specify, instead of using the IP address that's reported by the web request // origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify @@ -17376,21 +17448,31 @@ type RateBasedStatement struct { // If the specified header isn't present in the request, WAF doesn't apply the // rule to the web request at all. // - // This is required if AggregateKeyType is set to FORWARDED_IP. + // This is required if you specify a forwarded IP in the rule's aggregate key + // settings. ForwardedIPConfig *ForwardedIPConfig `type:"structure"` - // The limit on requests per 5-minute period for a single originating IP address. - // If the statement includes a ScopeDownStatement, this limit is applied only - // to the requests that match the statement. + // The limit on requests per 5-minute period for a single aggregation instance + // for the rate-based rule. If the rate-based statement includes a ScopeDownStatement, + // this limit is applied only to the requests that match the statement. + // + // Examples: + // + // * If you aggregate on just the IP address, this is the limit on requests + // from any single IP address. + // + // * If you aggregate on the HTTP method and the query argument name "city", + // then this is the limit on requests for any single method, city pair. // // Limit is a required field Limit *int64 `min:"100" type:"long" required:"true"` // An optional nested statement that narrows the scope of the web requests that - // are evaluated by the rate-based statement. Requests are only tracked by the - // rate-based statement if they match the scope-down statement. You can use - // any nestable Statement in the scope-down statement, and you can nest statements - // at any level, the same as you can for a rule statement. + // are evaluated and managed by the rate-based statement. When you use a scope-down + // statement, the rate-based rule only tracks and rate limits requests that + // match the scope-down statement. You can use any nestable Statement in the + // scope-down statement, and you can nest statements at any level, the same + // as you can for a rule statement. ScopeDownStatement *Statement `type:"structure"` } @@ -17418,12 +17500,25 @@ func (s *RateBasedStatement) Validate() error { if s.AggregateKeyType == nil { invalidParams.Add(request.NewErrParamRequired("AggregateKeyType")) } + if s.CustomKeys != nil && len(s.CustomKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CustomKeys", 1)) + } if s.Limit == nil { invalidParams.Add(request.NewErrParamRequired("Limit")) } if s.Limit != nil && *s.Limit < 100 { invalidParams.Add(request.NewErrParamMinValue("Limit", 100)) } + if s.CustomKeys != nil { + for i, v := range s.CustomKeys { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CustomKeys", i), err.(request.ErrInvalidParams)) + } + } + } if s.ForwardedIPConfig != nil { if err := s.ForwardedIPConfig.Validate(); err != nil { invalidParams.AddNested("ForwardedIPConfig", err.(request.ErrInvalidParams)) @@ -17447,6 +17542,12 @@ func (s *RateBasedStatement) SetAggregateKeyType(v string) *RateBasedStatement { return s } +// SetCustomKeys sets the CustomKeys field's value. +func (s *RateBasedStatement) SetCustomKeys(v []*RateBasedStatementCustomKey) *RateBasedStatement { + s.CustomKeys = v + return s +} + // SetForwardedIPConfig sets the ForwardedIPConfig field's value. func (s *RateBasedStatement) SetForwardedIPConfig(v *ForwardedIPConfig) *RateBasedStatement { s.ForwardedIPConfig = v @@ -17465,15 +17566,73 @@ func (s *RateBasedStatement) SetScopeDownStatement(v *Statement) *RateBasedState return s } -// The set of IP addresses that are currently blocked for a RateBasedStatement. -type RateBasedStatementManagedKeysIPSet struct { +// Specifies a single custom aggregate key for a rate-base rule. +// +// Web requests that are missing any of the components specified in the aggregation +// keys are omitted from the rate-based rule evaluation and handling. +type RateBasedStatementCustomKey struct { _ struct{} `type:"structure"` - // The IP addresses that are currently blocked. - Addresses []*string `type:"list"` + // Use the value of a cookie in the request as an aggregate key. Each distinct + // value in the cookie contributes to the aggregation instance. If you use a + // single cookie as your custom key, then each value fully defines an aggregation + // instance. + Cookie *RateLimitCookie `type:"structure"` + + // Use the first IP address in an HTTP header as an aggregate key. Each distinct + // forwarded IP address contributes to the aggregation instance. + // + // When you specify an IP or forwarded IP in the custom key settings, you must + // also specify at least one other key to use. You can aggregate on only the + // forwarded IP address by specifying FORWARDED_IP in your rate-based statement's + // AggregateKeyType. + // + // With this option, you must specify the header to use in the rate-based rule's + // ForwardedIPConfig property. + ForwardedIP *RateLimitForwardedIP `type:"structure"` + + // Use the request's HTTP method as an aggregate key. Each distinct HTTP method + // contributes to the aggregation instance. If you use just the HTTP method + // as your custom key, then each method fully defines an aggregation instance. + HTTPMethod *RateLimitHTTPMethod `type:"structure"` + + // Use the value of a header in the request as an aggregate key. Each distinct + // value in the header contributes to the aggregation instance. If you use a + // single header as your custom key, then each value fully defines an aggregation + // instance. + Header *RateLimitHeader `type:"structure"` + + // Use the request's originating IP address as an aggregate key. Each distinct + // IP address contributes to the aggregation instance. + // + // When you specify an IP or forwarded IP in the custom key settings, you must + // also specify at least one other key to use. You can aggregate on only the + // IP address by specifying IP in your rate-based statement's AggregateKeyType. + IP *RateLimitIP `type:"structure"` + + // Use the specified label namespace as an aggregate key. Each distinct fully + // qualified label name that has the specified label namespace contributes to + // the aggregation instance. If you use just one label namespace as your custom + // key, then each label name fully defines an aggregation instance. + // + // This uses only labels that have been added to the request by rules that are + // evaluated before this rate-based rule in the web ACL. + // + // For information about label namespaces and names, see Label syntax and naming + // requirements (https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-label-requirements.html) + // in the WAF Developer Guide. + LabelNamespace *RateLimitLabelNamespace `type:"structure"` - // The version of the IP addresses, either IPV4 or IPV6. - IPAddressVersion *string `type:"string" enum:"IPAddressVersion"` + // Use the specified query argument as an aggregate key. Each distinct value + // for the named query argument contributes to the aggregation instance. If + // you use a single query argument as your custom key, then each value fully + // defines an aggregation instance. + QueryArgument *RateLimitQueryArgument `type:"structure"` + + // Use the request's query string as an aggregate key. Each distinct string + // contributes to the aggregation instance. If you use just the query string + // as your custom key, then each string fully defines an aggregation instance. + QueryString *RateLimitQueryString `type:"structure"` } // String returns the string representation. @@ -17481,7 +17640,7 @@ type RateBasedStatementManagedKeysIPSet struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s RateBasedStatementManagedKeysIPSet) String() string { +func (s RateBasedStatementCustomKey) String() string { return awsutil.Prettify(s) } @@ -17490,28 +17649,115 @@ func (s RateBasedStatementManagedKeysIPSet) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s RateBasedStatementManagedKeysIPSet) GoString() string { +func (s RateBasedStatementCustomKey) GoString() string { return s.String() } -// SetAddresses sets the Addresses field's value. -func (s *RateBasedStatementManagedKeysIPSet) SetAddresses(v []*string) *RateBasedStatementManagedKeysIPSet { - s.Addresses = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *RateBasedStatementCustomKey) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RateBasedStatementCustomKey"} + if s.Cookie != nil { + if err := s.Cookie.Validate(); err != nil { + invalidParams.AddNested("Cookie", err.(request.ErrInvalidParams)) + } + } + if s.Header != nil { + if err := s.Header.Validate(); err != nil { + invalidParams.AddNested("Header", err.(request.ErrInvalidParams)) + } + } + if s.LabelNamespace != nil { + if err := s.LabelNamespace.Validate(); err != nil { + invalidParams.AddNested("LabelNamespace", err.(request.ErrInvalidParams)) + } + } + if s.QueryArgument != nil { + if err := s.QueryArgument.Validate(); err != nil { + invalidParams.AddNested("QueryArgument", err.(request.ErrInvalidParams)) + } + } + if s.QueryString != nil { + if err := s.QueryString.Validate(); err != nil { + invalidParams.AddNested("QueryString", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCookie sets the Cookie field's value. +func (s *RateBasedStatementCustomKey) SetCookie(v *RateLimitCookie) *RateBasedStatementCustomKey { + s.Cookie = v return s } -// SetIPAddressVersion sets the IPAddressVersion field's value. -func (s *RateBasedStatementManagedKeysIPSet) SetIPAddressVersion(v string) *RateBasedStatementManagedKeysIPSet { - s.IPAddressVersion = &v +// SetForwardedIP sets the ForwardedIP field's value. +func (s *RateBasedStatementCustomKey) SetForwardedIP(v *RateLimitForwardedIP) *RateBasedStatementCustomKey { + s.ForwardedIP = v return s } -// A single regular expression. This is used in a RegexPatternSet. -type Regex struct { +// SetHTTPMethod sets the HTTPMethod field's value. +func (s *RateBasedStatementCustomKey) SetHTTPMethod(v *RateLimitHTTPMethod) *RateBasedStatementCustomKey { + s.HTTPMethod = v + return s +} + +// SetHeader sets the Header field's value. +func (s *RateBasedStatementCustomKey) SetHeader(v *RateLimitHeader) *RateBasedStatementCustomKey { + s.Header = v + return s +} + +// SetIP sets the IP field's value. +func (s *RateBasedStatementCustomKey) SetIP(v *RateLimitIP) *RateBasedStatementCustomKey { + s.IP = v + return s +} + +// SetLabelNamespace sets the LabelNamespace field's value. +func (s *RateBasedStatementCustomKey) SetLabelNamespace(v *RateLimitLabelNamespace) *RateBasedStatementCustomKey { + s.LabelNamespace = v + return s +} + +// SetQueryArgument sets the QueryArgument field's value. +func (s *RateBasedStatementCustomKey) SetQueryArgument(v *RateLimitQueryArgument) *RateBasedStatementCustomKey { + s.QueryArgument = v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *RateBasedStatementCustomKey) SetQueryString(v *RateLimitQueryString) *RateBasedStatementCustomKey { + s.QueryString = v + return s +} + +// The set of IP addresses that are currently blocked for a RateBasedStatement. +// This is only available for rate-based rules that aggregate on just the IP +// address, with the AggregateKeyType set to IP or FORWARDED_IP. +// +// A rate-based rule applies its rule action to requests from IP addresses that +// are in the rule's managed keys list and that match the rule's scope-down +// statement. When a rule has no scope-down statement, it applies the action +// to all requests from the IP addresses that are in the list. The rule applies +// its rule action to rate limit the matching requests. The action is usually +// Block but it can be any valid rule action except for Allow. +// +// The maximum number of IP addresses that can be rate limited by a single rate-based +// rule instance is 10,000. If more than 10,000 addresses exceed the rate limit, +// WAF limits those with the highest rates. +type RateBasedStatementManagedKeysIPSet struct { _ struct{} `type:"structure"` - // The string representing the regular expression. - RegexString *string `min:"1" type:"string"` + // The IP addresses that are currently blocked. + Addresses []*string `type:"list"` + + // The version of the IP addresses, either IPV4 or IPV6. + IPAddressVersion *string `type:"string" enum:"IPAddressVersion"` } // String returns the string representation. @@ -17519,7 +17765,7 @@ type Regex struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Regex) String() string { +func (s RateBasedStatementManagedKeysIPSet) String() string { return awsutil.Prettify(s) } @@ -17528,49 +17774,42 @@ func (s Regex) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Regex) GoString() string { +func (s RateBasedStatementManagedKeysIPSet) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Regex) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Regex"} - if s.RegexString != nil && len(*s.RegexString) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RegexString", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetAddresses sets the Addresses field's value. +func (s *RateBasedStatementManagedKeysIPSet) SetAddresses(v []*string) *RateBasedStatementManagedKeysIPSet { + s.Addresses = v + return s } -// SetRegexString sets the RegexString field's value. -func (s *Regex) SetRegexString(v string) *Regex { - s.RegexString = &v +// SetIPAddressVersion sets the IPAddressVersion field's value. +func (s *RateBasedStatementManagedKeysIPSet) SetIPAddressVersion(v string) *RateBasedStatementManagedKeysIPSet { + s.IPAddressVersion = &v return s } -// A rule statement used to search web request components for a match against -// a single regular expression. -type RegexMatchStatement struct { +// Specifies a cookie as an aggregate key for a rate-based rule. Each distinct +// value in the cookie contributes to the aggregation instance. If you use a +// single cookie as your custom key, then each value fully defines an aggregation +// instance. +type RateLimitCookie struct { _ struct{} `type:"structure"` - // The part of the web request that you want WAF to inspect. - // - // FieldToMatch is a required field - FieldToMatch *FieldToMatch `type:"structure" required:"true"` - - // The string representing the regular expression. + // The name of the cookie to use. // - // RegexString is a required field - RegexString *string `min:"1" type:"string" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` // Text transformations eliminate some of the unusual formatting that attackers - // use in web requests in an effort to bypass detection. If you specify one - // or more transformations in a rule statement, WAF performs all transformations - // on the content of the request component identified by FieldToMatch, starting - // from the lowest priority setting, before inspecting the content for a match. + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -17581,7 +17820,7 @@ type RegexMatchStatement struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s RegexMatchStatement) String() string { +func (s RateLimitCookie) String() string { return awsutil.Prettify(s) } @@ -17590,21 +17829,18 @@ func (s RegexMatchStatement) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s RegexMatchStatement) GoString() string { +func (s RateLimitCookie) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RegexMatchStatement) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RegexMatchStatement"} - if s.FieldToMatch == nil { - invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) - } - if s.RegexString == nil { - invalidParams.Add(request.NewErrParamRequired("RegexString")) +func (s *RateLimitCookie) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RateLimitCookie"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.RegexString != nil && len(*s.RegexString) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RegexString", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if s.TextTransformations == nil { invalidParams.Add(request.NewErrParamRequired("TextTransformations")) @@ -17612,11 +17848,6 @@ func (s *RegexMatchStatement) Validate() error { if s.TextTransformations != nil && len(s.TextTransformations) < 1 { invalidParams.Add(request.NewErrParamMinLen("TextTransformations", 1)) } - if s.FieldToMatch != nil { - if err := s.FieldToMatch.Validate(); err != nil { - invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) - } - } if s.TextTransformations != nil { for i, v := range s.TextTransformations { if v == nil { @@ -17634,46 +17865,39 @@ func (s *RegexMatchStatement) Validate() error { return nil } -// SetFieldToMatch sets the FieldToMatch field's value. -func (s *RegexMatchStatement) SetFieldToMatch(v *FieldToMatch) *RegexMatchStatement { - s.FieldToMatch = v - return s -} - -// SetRegexString sets the RegexString field's value. -func (s *RegexMatchStatement) SetRegexString(v string) *RegexMatchStatement { - s.RegexString = &v +// SetName sets the Name field's value. +func (s *RateLimitCookie) SetName(v string) *RateLimitCookie { + s.Name = &v return s } // SetTextTransformations sets the TextTransformations field's value. -func (s *RegexMatchStatement) SetTextTransformations(v []*TextTransformation) *RegexMatchStatement { +func (s *RateLimitCookie) SetTextTransformations(v []*TextTransformation) *RateLimitCookie { s.TextTransformations = v return s } -// Contains one or more regular expressions. +// Specifies the first IP address in an HTTP header as an aggregate key for +// a rate-based rule. Each distinct forwarded IP address contributes to the +// aggregation instance. // -// WAF assigns an ARN to each RegexPatternSet that you create. To use a set -// in a rule, you provide the ARN to the Rule statement RegexPatternSetReferenceStatement. -type RegexPatternSet struct { +// This setting is used only in the RateBasedStatementCustomKey specification +// of a rate-based rule statement. When you specify an IP or forwarded IP in +// the custom key settings, you must also specify at least one other key to +// use. You can aggregate on only the forwarded IP address by specifying FORWARDED_IP +// in your rate-based statement's AggregateKeyType. +// +// This data type supports using the forwarded IP address in the web request +// aggregation for a rate-based rule, in RateBasedStatementCustomKey. The JSON +// specification for using the forwarded IP address doesn't explicitly use this +// data type. +// +// JSON specification: "ForwardedIP": {} +// +// When you use this specification, you must also configure the forwarded IP +// address in the rate-based statement's ForwardedIPConfig. +type RateLimitForwardedIP struct { _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the entity. - ARN *string `min:"20" type:"string"` - - // A description of the set that helps with identification. - Description *string `min:"1" type:"string"` - - // A unique identifier for the set. This ID is returned in the responses to - // create and list commands. You provide it to operations like update and delete. - Id *string `min:"1" type:"string"` - - // The name of the set. You cannot change the name after you create the set. - Name *string `min:"1" type:"string"` - - // The regular expression patterns in the set. - RegularExpressionList []*Regex `type:"list"` } // String returns the string representation. @@ -17681,7 +17905,7 @@ type RegexPatternSet struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s RegexPatternSet) String() string { +func (s RateLimitForwardedIP) String() string { return awsutil.Prettify(s) } @@ -17690,37 +17914,592 @@ func (s RegexPatternSet) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s RegexPatternSet) GoString() string { +func (s RateLimitForwardedIP) GoString() string { return s.String() } -// SetARN sets the ARN field's value. -func (s *RegexPatternSet) SetARN(v string) *RegexPatternSet { - s.ARN = &v - return s +// Specifies the request's HTTP method as an aggregate key for a rate-based +// rule. Each distinct HTTP method contributes to the aggregation instance. +// If you use just the HTTP method as your custom key, then each method fully +// defines an aggregation instance. +// +// JSON specification: "RateLimitHTTPMethod": {} +type RateLimitHTTPMethod struct { + _ struct{} `type:"structure"` } -// SetDescription sets the Description field's value. -func (s *RegexPatternSet) SetDescription(v string) *RegexPatternSet { - s.Description = &v - return s +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitHTTPMethod) String() string { + return awsutil.Prettify(s) } -// SetId sets the Id field's value. -func (s *RegexPatternSet) SetId(v string) *RegexPatternSet { - s.Id = &v - return s +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitHTTPMethod) GoString() string { + return s.String() } -// SetName sets the Name field's value. -func (s *RegexPatternSet) SetName(v string) *RegexPatternSet { - s.Name = &v - return s -} +// Specifies a header as an aggregate key for a rate-based rule. Each distinct +// value in the header contributes to the aggregation instance. If you use a +// single header as your custom key, then each value fully defines an aggregation +// instance. +type RateLimitHeader struct { + _ struct{} `type:"structure"` -// SetRegularExpressionList sets the RegularExpressionList field's value. -func (s *RegexPatternSet) SetRegularExpressionList(v []*Regex) *RegexPatternSet { - s.RegularExpressionList = v + // The name of the header to use. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. + // + // TextTransformations is a required field + TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitHeader) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitHeader) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RateLimitHeader) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RateLimitHeader"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.TextTransformations == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformations")) + } + if s.TextTransformations != nil && len(s.TextTransformations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TextTransformations", 1)) + } + if s.TextTransformations != nil { + for i, v := range s.TextTransformations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TextTransformations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *RateLimitHeader) SetName(v string) *RateLimitHeader { + s.Name = &v + return s +} + +// SetTextTransformations sets the TextTransformations field's value. +func (s *RateLimitHeader) SetTextTransformations(v []*TextTransformation) *RateLimitHeader { + s.TextTransformations = v + return s +} + +// Specifies the IP address in the web request as an aggregate key for a rate-based +// rule. Each distinct IP address contributes to the aggregation instance. +// +// This setting is used only in the RateBasedStatementCustomKey specification +// of a rate-based rule statement. To use this in the custom key settings, you +// must specify at least one other key to use, along with the IP address. To +// aggregate on only the IP address, in your rate-based statement's AggregateKeyType, +// specify IP. +// +// JSON specification: "RateLimitIP": {} +type RateLimitIP struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitIP) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitIP) GoString() string { + return s.String() +} + +// Specifies a label namespace to use as an aggregate key for a rate-based rule. +// Each distinct fully qualified label name that has the specified label namespace +// contributes to the aggregation instance. If you use just one label namespace +// as your custom key, then each label name fully defines an aggregation instance. +// +// This uses only labels that have been added to the request by rules that are +// evaluated before this rate-based rule in the web ACL. +// +// For information about label namespaces and names, see Label syntax and naming +// requirements (https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-label-requirements.html) +// in the WAF Developer Guide. +type RateLimitLabelNamespace struct { + _ struct{} `type:"structure"` + + // The namespace to use for aggregation. + // + // Namespace is a required field + Namespace *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitLabelNamespace) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitLabelNamespace) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RateLimitLabelNamespace) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RateLimitLabelNamespace"} + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNamespace sets the Namespace field's value. +func (s *RateLimitLabelNamespace) SetNamespace(v string) *RateLimitLabelNamespace { + s.Namespace = &v + return s +} + +// Specifies a query argument in the request as an aggregate key for a rate-based +// rule. Each distinct value for the named query argument contributes to the +// aggregation instance. If you use a single query argument as your custom key, +// then each value fully defines an aggregation instance. +type RateLimitQueryArgument struct { + _ struct{} `type:"structure"` + + // The name of the query argument to use. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. + // + // TextTransformations is a required field + TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitQueryArgument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitQueryArgument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RateLimitQueryArgument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RateLimitQueryArgument"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.TextTransformations == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformations")) + } + if s.TextTransformations != nil && len(s.TextTransformations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TextTransformations", 1)) + } + if s.TextTransformations != nil { + for i, v := range s.TextTransformations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TextTransformations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *RateLimitQueryArgument) SetName(v string) *RateLimitQueryArgument { + s.Name = &v + return s +} + +// SetTextTransformations sets the TextTransformations field's value. +func (s *RateLimitQueryArgument) SetTextTransformations(v []*TextTransformation) *RateLimitQueryArgument { + s.TextTransformations = v + return s +} + +// Specifies the request's query string as an aggregate key for a rate-based +// rule. Each distinct string contributes to the aggregation instance. If you +// use just the query string as your custom key, then each string fully defines +// an aggregation instance. +type RateLimitQueryString struct { + _ struct{} `type:"structure"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. + // + // TextTransformations is a required field + TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitQueryString) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RateLimitQueryString) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RateLimitQueryString) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RateLimitQueryString"} + if s.TextTransformations == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformations")) + } + if s.TextTransformations != nil && len(s.TextTransformations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TextTransformations", 1)) + } + if s.TextTransformations != nil { + for i, v := range s.TextTransformations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TextTransformations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTextTransformations sets the TextTransformations field's value. +func (s *RateLimitQueryString) SetTextTransformations(v []*TextTransformation) *RateLimitQueryString { + s.TextTransformations = v + return s +} + +// A single regular expression. This is used in a RegexPatternSet. +type Regex struct { + _ struct{} `type:"structure"` + + // The string representing the regular expression. + RegexString *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Regex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Regex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Regex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Regex"} + if s.RegexString != nil && len(*s.RegexString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegexString sets the RegexString field's value. +func (s *Regex) SetRegexString(v string) *Regex { + s.RegexString = &v + return s +} + +// A rule statement used to search web request components for a match against +// a single regular expression. +type RegexMatchStatement struct { + _ struct{} `type:"structure"` + + // The part of the web request that you want WAF to inspect. + // + // FieldToMatch is a required field + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // The string representing the regular expression. + // + // RegexString is a required field + RegexString *string `min:"1" type:"string" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. + // + // TextTransformations is a required field + TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegexMatchStatement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegexMatchStatement) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegexMatchStatement) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegexMatchStatement"} + if s.FieldToMatch == nil { + invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) + } + if s.RegexString == nil { + invalidParams.Add(request.NewErrParamRequired("RegexString")) + } + if s.RegexString != nil && len(*s.RegexString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexString", 1)) + } + if s.TextTransformations == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformations")) + } + if s.TextTransformations != nil && len(s.TextTransformations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TextTransformations", 1)) + } + if s.FieldToMatch != nil { + if err := s.FieldToMatch.Validate(); err != nil { + invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) + } + } + if s.TextTransformations != nil { + for i, v := range s.TextTransformations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TextTransformations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFieldToMatch sets the FieldToMatch field's value. +func (s *RegexMatchStatement) SetFieldToMatch(v *FieldToMatch) *RegexMatchStatement { + s.FieldToMatch = v + return s +} + +// SetRegexString sets the RegexString field's value. +func (s *RegexMatchStatement) SetRegexString(v string) *RegexMatchStatement { + s.RegexString = &v + return s +} + +// SetTextTransformations sets the TextTransformations field's value. +func (s *RegexMatchStatement) SetTextTransformations(v []*TextTransformation) *RegexMatchStatement { + s.TextTransformations = v + return s +} + +// Contains one or more regular expressions. +// +// WAF assigns an ARN to each RegexPatternSet that you create. To use a set +// in a rule, you provide the ARN to the Rule statement RegexPatternSetReferenceStatement. +type RegexPatternSet struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the entity. + ARN *string `min:"20" type:"string"` + + // A description of the set that helps with identification. + Description *string `min:"1" type:"string"` + + // A unique identifier for the set. This ID is returned in the responses to + // create and list commands. You provide it to operations like update and delete. + Id *string `min:"1" type:"string"` + + // The name of the set. You cannot change the name after you create the set. + Name *string `min:"1" type:"string"` + + // The regular expression patterns in the set. + RegularExpressionList []*Regex `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegexPatternSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegexPatternSet) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *RegexPatternSet) SetARN(v string) *RegexPatternSet { + s.ARN = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RegexPatternSet) SetDescription(v string) *RegexPatternSet { + s.Description = &v + return s +} + +// SetId sets the Id field's value. +func (s *RegexPatternSet) SetId(v string) *RegexPatternSet { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *RegexPatternSet) SetName(v string) *RegexPatternSet { + s.Name = &v + return s +} + +// SetRegularExpressionList sets the RegularExpressionList field's value. +func (s *RegexPatternSet) SetRegularExpressionList(v []*Regex) *RegexPatternSet { + s.RegularExpressionList = v return s } @@ -17750,10 +18529,13 @@ type RegexPatternSetReferenceStatement struct { FieldToMatch *FieldToMatch `type:"structure" required:"true"` // Text transformations eliminate some of the unusual formatting that attackers - // use in web requests in an effort to bypass detection. If you specify one - // or more transformations in a rule statement, WAF performs all transformations - // on the content of the request component identified by FieldToMatch, starting - // from the lowest priority setting, before inspecting the content for a match. + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -19735,10 +20517,13 @@ type SizeConstraintStatement struct { Size *int64 `type:"long" required:"true"` // Text transformations eliminate some of the unusual formatting that attackers - // use in web requests in an effort to bypass detection. If you specify one - // or more transformations in a rule statement, WAF performs all transformations - // on the content of the request component identified by FieldToMatch, starting - // from the lowest priority setting, before inspecting the content for a match. + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -19852,10 +20637,13 @@ type SqliMatchStatement struct { SensitivityLevel *string `type:"string" enum:"SensitivityLevel"` // Text transformations eliminate some of the unusual formatting that attackers - // use in web requests in an effort to bypass detection. If you specify one - // or more transformations in a rule statement, WAF performs all transformations - // on the content of the request component identified by FieldToMatch, starting - // from the lowest priority setting, before inspecting the content for a match. + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -20026,44 +20814,84 @@ type Statement struct { // You provide more than one Statement within the OrStatement. OrStatement *OrStatement `type:"structure"` - // A rate-based rule tracks the rate of requests for each originating IP address, - // and triggers the rule action when the rate exceeds a limit that you specify - // on the number of requests in any 5-minute time span. You can use this to - // put a temporary block on requests from an IP address that is sending excessive - // requests. + // A rate-based rule counts incoming requests and rate limits requests when + // they are coming at too fast a rate. The rule categorizes requests according + // to your aggregation criteria, collects them into aggregation instances, and + // counts and rate limits the requests for each instance. // - // WAF tracks and manages web requests separately for each instance of a rate-based - // rule that you use. For example, if you provide the same rate-based rule settings - // in two web ACLs, each of the two rule statements represents a separate instance - // of the rate-based rule and gets its own tracking and management by WAF. If - // you define a rate-based rule inside a rule group, and then use that rule - // group in multiple places, each use creates a separate instance of the rate-based - // rule that gets its own tracking and management by WAF. + // You can specify individual aggregation keys, like IP address or HTTP method. + // You can also specify aggregation key combinations, like IP address and HTTP + // method, or HTTP method, query argument, and cookie. // - // When the rule action triggers, WAF blocks additional requests from the IP - // address until the request rate falls below the limit. + // Each unique set of values for the aggregation keys that you specify is a + // separate aggregation instance, with the value from each key contributing + // to the aggregation instance definition. // - // You can optionally nest another statement inside the rate-based statement, - // to narrow the scope of the rule so that it only counts requests that match - // the nested statement. For example, based on recent requests that you have - // seen from an attacker, you might create a rate-based rule with a nested AND - // rule statement that contains the following nested statements: + // For example, assume the rule evaluates web requests with the following IP + // address and HTTP method values: + // + // * IP address 10.1.1.1, HTTP method POST + // + // * IP address 10.1.1.1, HTTP method GET // - // * An IP match statement with an IP set that specifies the address 192.0.2.44. + // * IP address 127.0.0.0, HTTP method POST // - // * A string match statement that searches in the User-Agent header for - // the string BadBot. + // * IP address 10.1.1.1, HTTP method GET // - // In this rate-based rule, you also define a rate limit. For this example, - // the rate limit is 1,000. Requests that meet the criteria of both of the nested - // statements are counted. If the count exceeds 1,000 requests per five minutes, - // the rule action triggers. Requests that do not meet the criteria of both - // of the nested statements are not counted towards the rate limit and are not - // affected by this rule. + // The rule would create different aggregation instances according to your aggregation + // criteria, for example: + // + // * If the aggregation criteria is just the IP address, then each individual + // address is an aggregation instance, and WAF counts requests separately + // for each. The aggregation instances and request counts for our example + // would be the following: IP address 10.1.1.1: count 3 IP address 127.0.0.0: + // count 1 + // + // * If the aggregation criteria is HTTP method, then each individual HTTP + // method is an aggregation instance. The aggregation instances and request + // counts for our example would be the following: HTTP method POST: count + // 2 HTTP method GET: count 2 + // + // * If the aggregation criteria is IP address and HTTP method, then each + // IP address and each HTTP method would contribute to the combined aggregation + // instance. The aggregation instances and request counts for our example + // would be the following: IP address 10.1.1.1, HTTP method POST: count 1 + // IP address 10.1.1.1, HTTP method GET: count 2 IP address 127.0.0.0, HTTP + // method POST: count 1 + // + // For any n-tuple of aggregation keys, each unique combination of values for + // the keys defines a separate aggregation instance, which WAF counts and rate-limits + // individually. + // + // You can optionally nest another statement inside the rate-based statement, + // to narrow the scope of the rule so that it only counts and rate limits requests + // that match the nested statement. You can use this nested scope-down statement + // in conjunction with your aggregation key specifications or you can just count + // and rate limit all requests that match the scope-down statement, without + // additional aggregation. When you choose to just manage all requests that + // match a scope-down statement, the aggregation instance is singular for the + // rule. // // You cannot nest a RateBasedStatement inside another statement, for example // inside a NotStatement or OrStatement. You can define a RateBasedStatement // inside a web ACL and inside a rule group. + // + // For additional information about the options, see Rate limiting web requests + // using rate-based rules (https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) + // in the WAF Developer Guide. + // + // If you only aggregate on the individual IP address or forwarded IP address, + // you can retrieve the list of IP addresses that WAF is currently rate limiting + // for a rule through the API call GetRateBasedStatementManagedKeys. This option + // is not available for other aggregation configurations. + // + // WAF tracks and manages web requests separately for each instance of a rate-based + // rule that you use. For example, if you provide the same rate-based rule settings + // in two web ACLs, each of the two rule statements represents a separate instance + // of the rate-based rule and gets its own tracking and management by WAF. If + // you define a rate-based rule inside a rule group, and then use that rule + // group in multiple places, each use creates a separate instance of the rate-based + // rule that gets its own tracking and management by WAF. RateBasedStatement *RateBasedStatement `type:"structure"` // A rule statement used to search web request components for a match against @@ -20548,10 +21376,10 @@ func (s TagResourceOutput) GoString() string { type TextTransformation struct { _ struct{} `type:"structure"` - // Sets the relative processing order for multiple transformations that are - // defined for a rule statement. WAF processes all transformations, from lowest - // priority to highest, before inspecting the transformed content. The priorities - // don't need to be consecutive, but they must all be different. + // Sets the relative processing order for multiple transformations. WAF processes + // all transformations, from lowest priority to highest, before inspecting the + // transformed content. The priorities don't need to be consecutive, but they + // must all be different. // // Priority is a required field Priority *int64 `type:"integer" required:"true"` @@ -23523,6 +24351,72 @@ func (s *WAFUnavailableEntityException) RequestID() string { return s.RespMetadata.RequestID } +// The rule that you've named doesn't aggregate solely on the IP address or +// solely on the forwarded IP address. This call is only available for rate-based +// rules with an AggregateKeyType setting of IP or FORWARDED_IP. +type WAFUnsupportedAggregateKeyTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WAFUnsupportedAggregateKeyTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WAFUnsupportedAggregateKeyTypeException) GoString() string { + return s.String() +} + +func newErrorWAFUnsupportedAggregateKeyTypeException(v protocol.ResponseMetadata) error { + return &WAFUnsupportedAggregateKeyTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *WAFUnsupportedAggregateKeyTypeException) Code() string { + return "WAFUnsupportedAggregateKeyTypeException" +} + +// Message returns the exception's message. +func (s *WAFUnsupportedAggregateKeyTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *WAFUnsupportedAggregateKeyTypeException) OrigErr() error { + return nil +} + +func (s *WAFUnsupportedAggregateKeyTypeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *WAFUnsupportedAggregateKeyTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *WAFUnsupportedAggregateKeyTypeException) RequestID() string { + return s.RespMetadata.RequestID +} + // A web ACL defines a collection of rules to use to inspect and control web // requests. Each rule has an action defined (allow, block, or count) for requests // that match the statement of the rule. In the web ACL, you assign a default @@ -23884,10 +24778,13 @@ type XssMatchStatement struct { FieldToMatch *FieldToMatch `type:"structure" required:"true"` // Text transformations eliminate some of the unusual formatting that attackers - // use in web requests in an effort to bypass detection. If you specify one - // or more transformations in a rule statement, WAF performs all transformations - // on the content of the request component identified by FieldToMatch, starting - // from the lowest priority setting, before inspecting the content for a match. + // use in web requests in an effort to bypass detection. Text transformations + // are used in rule match statements, to transform the FieldToMatch request + // component before inspecting it, and they're used in rate-based rule statements, + // to transform request components before using them as custom aggregation keys. + // If you specify one or more transformations to apply, WAF performs all transformations + // on the specified content, starting from the lowest priority setting, and + // then uses the component contents. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -25462,6 +26359,12 @@ const ( // ParameterExceptionFieldAssociatedResourceType is a ParameterExceptionField enum value ParameterExceptionFieldAssociatedResourceType = "ASSOCIATED_RESOURCE_TYPE" + + // ParameterExceptionFieldScopeDown is a ParameterExceptionField enum value + ParameterExceptionFieldScopeDown = "SCOPE_DOWN" + + // ParameterExceptionFieldCustomKeys is a ParameterExceptionField enum value + ParameterExceptionFieldCustomKeys = "CUSTOM_KEYS" ) // ParameterExceptionField_Values returns all elements of the ParameterExceptionField enum @@ -25534,6 +26437,8 @@ func ParameterExceptionField_Values() []string { ParameterExceptionFieldTokenDomain, ParameterExceptionFieldAtpRuleSetResponseInspection, ParameterExceptionFieldAssociatedResourceType, + ParameterExceptionFieldScopeDown, + ParameterExceptionFieldCustomKeys, } } @@ -25603,6 +26508,12 @@ const ( // RateBasedStatementAggregateKeyTypeForwardedIp is a RateBasedStatementAggregateKeyType enum value RateBasedStatementAggregateKeyTypeForwardedIp = "FORWARDED_IP" + + // RateBasedStatementAggregateKeyTypeCustomKeys is a RateBasedStatementAggregateKeyType enum value + RateBasedStatementAggregateKeyTypeCustomKeys = "CUSTOM_KEYS" + + // RateBasedStatementAggregateKeyTypeConstant is a RateBasedStatementAggregateKeyType enum value + RateBasedStatementAggregateKeyTypeConstant = "CONSTANT" ) // RateBasedStatementAggregateKeyType_Values returns all elements of the RateBasedStatementAggregateKeyType enum @@ -25610,6 +26521,8 @@ func RateBasedStatementAggregateKeyType_Values() []string { return []string{ RateBasedStatementAggregateKeyTypeIp, RateBasedStatementAggregateKeyTypeForwardedIp, + RateBasedStatementAggregateKeyTypeCustomKeys, + RateBasedStatementAggregateKeyTypeConstant, } } diff --git a/service/wafv2/errors.go b/service/wafv2/errors.go index 22c6d79316b..c2844c7ae87 100644 --- a/service/wafv2/errors.go +++ b/service/wafv2/errors.go @@ -184,6 +184,14 @@ const ( // a number of minutes for changes to propagate. Verify the resources that you // are specifying in your request parameters and then retry the operation. ErrCodeWAFUnavailableEntityException = "WAFUnavailableEntityException" + + // ErrCodeWAFUnsupportedAggregateKeyTypeException for service response error code + // "WAFUnsupportedAggregateKeyTypeException". + // + // The rule that you've named doesn't aggregate solely on the IP address or + // solely on the forwarded IP address. This call is only available for rate-based + // rules with an AggregateKeyType setting of IP or FORWARDED_IP. + ErrCodeWAFUnsupportedAggregateKeyTypeException = "WAFUnsupportedAggregateKeyTypeException" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ @@ -205,4 +213,5 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "WAFTagOperationException": newErrorWAFTagOperationException, "WAFTagOperationInternalErrorException": newErrorWAFTagOperationInternalErrorException, "WAFUnavailableEntityException": newErrorWAFUnavailableEntityException, + "WAFUnsupportedAggregateKeyTypeException": newErrorWAFUnsupportedAggregateKeyTypeException, }