From 680aa4bf8493422a4067dfa6806a299d6e83e2c7 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 29 Nov 2022 19:07:40 +0000 Subject: [PATCH 1/3] Update to latest models --- .../next-release/api-change-ec2-70430.json | 5 + .../api-change-firehose-73695.json | 5 + .../next-release/api-change-kms-94096.json | 5 + .../next-release/api-change-omics-68242.json | 5 + ...api-change-opensearchserverless-71986.json | 5 + .../api-change-securitylake-57328.json | 5 + .../api-change-simspaceweaver-88091.json | 5 + .../data/ec2/2016-11-15/paginators-1.json | 30 + botocore/data/ec2/2016-11-15/service-2.json | 2127 ++++- .../data/firehose/2015-08-04/service-2.json | 447 +- .../kms/2014-11-01/endpoint-rule-set-1.json | 2 +- botocore/data/kms/2014-11-01/service-2.json | 456 +- .../omics/2022-11-28/endpoint-rule-set-1.json | 309 + .../data/omics/2022-11-28/paginators-1.json | 100 + botocore/data/omics/2022-11-28/service-2.json | 7338 +++++++++++++++++ botocore/data/omics/2022-11-28/waiters-2.json | 498 ++ .../2021-11-01/endpoint-rule-set-1.json | 309 + .../2021-11-01/paginators-1.json | 3 + .../2021-11-01/service-2.json | 2399 ++++++ .../2018-05-10/endpoint-rule-set-1.json | 309 + .../securitylake/2018-05-10/paginators-1.json | 28 + .../securitylake/2018-05-10/service-2.json | 2224 +++++ .../2022-10-28/endpoint-rule-set-1.json | 309 + .../2022-10-28/paginators-1.json | 3 + .../simspaceweaver/2022-10-28/service-2.json | 1232 +++ .../firehose/endpoint-tests-1.json | 502 +- .../endpoint-rules/kms/endpoint-tests-1.json | 550 +- .../omics/endpoint-tests-1.json | 295 + .../endpoint-tests-1.json | 295 + .../securitylake/endpoint-tests-1.json | 295 + .../simspaceweaver/endpoint-tests-1.json | 295 + 31 files changed, 19694 insertions(+), 696 deletions(-) create mode 100644 .changes/next-release/api-change-ec2-70430.json create mode 100644 .changes/next-release/api-change-firehose-73695.json create mode 100644 .changes/next-release/api-change-kms-94096.json create mode 100644 .changes/next-release/api-change-omics-68242.json create mode 100644 .changes/next-release/api-change-opensearchserverless-71986.json create mode 100644 .changes/next-release/api-change-securitylake-57328.json create mode 100644 .changes/next-release/api-change-simspaceweaver-88091.json create mode 100644 botocore/data/omics/2022-11-28/endpoint-rule-set-1.json create mode 100644 botocore/data/omics/2022-11-28/paginators-1.json create mode 100644 botocore/data/omics/2022-11-28/service-2.json create mode 100644 botocore/data/omics/2022-11-28/waiters-2.json create mode 100644 botocore/data/opensearchserverless/2021-11-01/endpoint-rule-set-1.json create mode 100644 botocore/data/opensearchserverless/2021-11-01/paginators-1.json create mode 100644 botocore/data/opensearchserverless/2021-11-01/service-2.json create mode 100644 botocore/data/securitylake/2018-05-10/endpoint-rule-set-1.json create mode 100644 botocore/data/securitylake/2018-05-10/paginators-1.json create mode 100644 botocore/data/securitylake/2018-05-10/service-2.json create mode 100644 botocore/data/simspaceweaver/2022-10-28/endpoint-rule-set-1.json create mode 100644 botocore/data/simspaceweaver/2022-10-28/paginators-1.json create mode 100644 botocore/data/simspaceweaver/2022-10-28/service-2.json create mode 100644 tests/functional/endpoint-rules/omics/endpoint-tests-1.json create mode 100644 tests/functional/endpoint-rules/opensearchserverless/endpoint-tests-1.json create mode 100644 tests/functional/endpoint-rules/securitylake/endpoint-tests-1.json create mode 100644 tests/functional/endpoint-rules/simspaceweaver/endpoint-tests-1.json diff --git a/.changes/next-release/api-change-ec2-70430.json b/.changes/next-release/api-change-ec2-70430.json new file mode 100644 index 0000000000..9ab7befe4a --- /dev/null +++ b/.changes/next-release/api-change-ec2-70430.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ec2``", + "description": "This release adds support for AWS Verified Access and the Hpc6id Amazon EC2 compute optimized instance type, which features 3rd generation Intel Xeon Scalable processors." +} diff --git a/.changes/next-release/api-change-firehose-73695.json b/.changes/next-release/api-change-firehose-73695.json new file mode 100644 index 0000000000..a831b3e4a7 --- /dev/null +++ b/.changes/next-release/api-change-firehose-73695.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``firehose``", + "description": "Allow support for the Serverless offering for Amazon OpenSearch Service as a Kinesis Data Firehose delivery destination." +} diff --git a/.changes/next-release/api-change-kms-94096.json b/.changes/next-release/api-change-kms-94096.json new file mode 100644 index 0000000000..c302c8ffb0 --- /dev/null +++ b/.changes/next-release/api-change-kms-94096.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``kms``", + "description": "AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control." +} diff --git a/.changes/next-release/api-change-omics-68242.json b/.changes/next-release/api-change-omics-68242.json new file mode 100644 index 0000000000..23ec3df4f4 --- /dev/null +++ b/.changes/next-release/api-change-omics-68242.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``omics``", + "description": "Amazon Omics is a new, purpose-built service that can be used by healthcare and life science organizations to store, query, and analyze omics data. The insights from that data can be used to accelerate scientific discoveries and improve healthcare." +} diff --git a/.changes/next-release/api-change-opensearchserverless-71986.json b/.changes/next-release/api-change-opensearchserverless-71986.json new file mode 100644 index 0000000000..598586a8bf --- /dev/null +++ b/.changes/next-release/api-change-opensearchserverless-71986.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``opensearchserverless``", + "description": "Publish SDK for Amazon OpenSearch Serverless" +} diff --git a/.changes/next-release/api-change-securitylake-57328.json b/.changes/next-release/api-change-securitylake-57328.json new file mode 100644 index 0000000000..b44bcd20d7 --- /dev/null +++ b/.changes/next-release/api-change-securitylake-57328.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``securitylake``", + "description": "Amazon Security Lake automatically centralizes security data from cloud, on-premises, and custom sources into a purpose-built data lake stored in your account. Security Lake makes it easier to analyze security data, so you can improve the protection of your workloads, applications, and data" +} diff --git a/.changes/next-release/api-change-simspaceweaver-88091.json b/.changes/next-release/api-change-simspaceweaver-88091.json new file mode 100644 index 0000000000..71d1c2103e --- /dev/null +++ b/.changes/next-release/api-change-simspaceweaver-88091.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``simspaceweaver``", + "description": "AWS SimSpace Weaver is a new service that helps customers build spatial simulations at new levels of scale - resulting in virtual worlds with millions of dynamic entities. See the AWS SimSpace Weaver developer guide for more details on how to get started. https://docs.aws.amazon.com/simspaceweaver" +} diff --git a/botocore/data/ec2/2016-11-15/paginators-1.json b/botocore/data/ec2/2016-11-15/paginators-1.json index 2ff0e292e1..798162f686 100644 --- a/botocore/data/ec2/2016-11-15/paginators-1.json +++ b/botocore/data/ec2/2016-11-15/paginators-1.json @@ -757,6 +757,36 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "DataResponses" + }, + "DescribeVerifiedAccessEndpoints": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "VerifiedAccessEndpoints" + }, + "DescribeVerifiedAccessGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "VerifiedAccessGroups" + }, + "DescribeVerifiedAccessInstanceLoggingConfigurations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LoggingConfigurations" + }, + "DescribeVerifiedAccessInstances": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "VerifiedAccessInstances" + }, + "DescribeVerifiedAccessTrustProviders": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "VerifiedAccessTrustProviders" } } } diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 7c57adc5c1..848f8c0432 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -310,6 +310,16 @@ "output":{"shape":"AttachNetworkInterfaceResult"}, "documentation":"

Attaches a network interface to an instance.

" }, + "AttachVerifiedAccessTrustProvider":{ + "name":"AttachVerifiedAccessTrustProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVerifiedAccessTrustProviderRequest"}, + "output":{"shape":"AttachVerifiedAccessTrustProviderResult"}, + "documentation":"

A trust provider is a third-party entity that creates, maintains, and manages identity information for users and devices. One or more trust providers can be attached to an Amazon Web Services Verified Access instance.

" + }, "AttachVolume":{ "name":"AttachVolume", "http":{ @@ -1176,6 +1186,46 @@ "output":{"shape":"CreateTransitGatewayVpcAttachmentResult"}, "documentation":"

Attaches the specified VPC to the specified transit gateway.

If you attach a VPC with a CIDR range that overlaps the CIDR range of a VPC that is already attached, the new VPC CIDR range is not propagated to the default propagation route table.

To send VPC traffic to an attached transit gateway, add a route to the VPC route table using CreateRoute.

" }, + "CreateVerifiedAccessEndpoint":{ + "name":"CreateVerifiedAccessEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVerifiedAccessEndpointRequest"}, + "output":{"shape":"CreateVerifiedAccessEndpointResult"}, + "documentation":"

An Amazon Web Services Verified Access endpoint is where you define your application along with an optional endpoint-level access policy.

" + }, + "CreateVerifiedAccessGroup":{ + "name":"CreateVerifiedAccessGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVerifiedAccessGroupRequest"}, + "output":{"shape":"CreateVerifiedAccessGroupResult"}, + "documentation":"

An Amazon Web Services Verified Access group is a collection of Amazon Web Services Verified Access endpoints who's associated applications have similar security requirements. Each instance within an Amazon Web Services Verified Access group shares an Amazon Web Services Verified Access policy. For example, you can group all Amazon Web Services Verified Access instances associated with “sales” applications together and use one common Amazon Web Services Verified Access policy.

" + }, + "CreateVerifiedAccessInstance":{ + "name":"CreateVerifiedAccessInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVerifiedAccessInstanceRequest"}, + "output":{"shape":"CreateVerifiedAccessInstanceResult"}, + "documentation":"

An Amazon Web Services Verified Access instance is a regional entity that evaluates application requests and grants access only when your security requirements are met.

" + }, + "CreateVerifiedAccessTrustProvider":{ + "name":"CreateVerifiedAccessTrustProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVerifiedAccessTrustProviderRequest"}, + "output":{"shape":"CreateVerifiedAccessTrustProviderResult"}, + "documentation":"

A trust provider is a third-party entity that creates, maintains, and manages identity information for users and devices. When an application request is made, the identity information sent by the trust provider will be evaluated by Amazon Web Services Verified Access, before allowing or denying the application request.

" + }, "CreateVolume":{ "name":"CreateVolume", "http":{ @@ -1840,6 +1890,46 @@ "output":{"shape":"DeleteTransitGatewayVpcAttachmentResult"}, "documentation":"

Deletes the specified VPC attachment.

" }, + "DeleteVerifiedAccessEndpoint":{ + "name":"DeleteVerifiedAccessEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVerifiedAccessEndpointRequest"}, + "output":{"shape":"DeleteVerifiedAccessEndpointResult"}, + "documentation":"

Delete an Amazon Web Services Verified Access endpoint.

" + }, + "DeleteVerifiedAccessGroup":{ + "name":"DeleteVerifiedAccessGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVerifiedAccessGroupRequest"}, + "output":{"shape":"DeleteVerifiedAccessGroupResult"}, + "documentation":"

Delete an Amazon Web Services Verified Access group.

" + }, + "DeleteVerifiedAccessInstance":{ + "name":"DeleteVerifiedAccessInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVerifiedAccessInstanceRequest"}, + "output":{"shape":"DeleteVerifiedAccessInstanceResult"}, + "documentation":"

Delete an Amazon Web Services Verified Access instance.

" + }, + "DeleteVerifiedAccessTrustProvider":{ + "name":"DeleteVerifiedAccessTrustProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVerifiedAccessTrustProviderRequest"}, + "output":{"shape":"DeleteVerifiedAccessTrustProviderResult"}, + "documentation":"

Delete an Amazon Web Services Verified Access trust provider.

" + }, "DeleteVolume":{ "name":"DeleteVolume", "http":{ @@ -3194,6 +3284,56 @@ "output":{"shape":"DescribeTrunkInterfaceAssociationsResult"}, "documentation":"

This API action is currently in limited preview only. If you are interested in using this feature, contact your account manager.

Describes one or more network interface trunk associations.

" }, + "DescribeVerifiedAccessEndpoints":{ + "name":"DescribeVerifiedAccessEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVerifiedAccessEndpointsRequest"}, + "output":{"shape":"DescribeVerifiedAccessEndpointsResult"}, + "documentation":"

Describe Amazon Web Services Verified Access endpoints.

" + }, + "DescribeVerifiedAccessGroups":{ + "name":"DescribeVerifiedAccessGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVerifiedAccessGroupsRequest"}, + "output":{"shape":"DescribeVerifiedAccessGroupsResult"}, + "documentation":"

Describe details of existing Verified Access groups.

" + }, + "DescribeVerifiedAccessInstanceLoggingConfigurations":{ + "name":"DescribeVerifiedAccessInstanceLoggingConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVerifiedAccessInstanceLoggingConfigurationsRequest"}, + "output":{"shape":"DescribeVerifiedAccessInstanceLoggingConfigurationsResult"}, + "documentation":"

Describes the current logging configuration for the Amazon Web Services Verified Access instances.

" + }, + "DescribeVerifiedAccessInstances":{ + "name":"DescribeVerifiedAccessInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVerifiedAccessInstancesRequest"}, + "output":{"shape":"DescribeVerifiedAccessInstancesResult"}, + "documentation":"

Describe Verified Access instances.

" + }, + "DescribeVerifiedAccessTrustProviders":{ + "name":"DescribeVerifiedAccessTrustProviders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVerifiedAccessTrustProvidersRequest"}, + "output":{"shape":"DescribeVerifiedAccessTrustProvidersResult"}, + "documentation":"

Describe details of existing Verified Access trust providers.

" + }, "DescribeVolumeAttribute":{ "name":"DescribeVolumeAttribute", "http":{ @@ -3392,6 +3532,16 @@ "input":{"shape":"DetachNetworkInterfaceRequest"}, "documentation":"

Detaches a network interface from an instance.

" }, + "DetachVerifiedAccessTrustProvider":{ + "name":"DetachVerifiedAccessTrustProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVerifiedAccessTrustProviderRequest"}, + "output":{"shape":"DetachVerifiedAccessTrustProviderResult"}, + "documentation":"

Detach a trust provider from an Amazon Web Services Verified Access instance.

" + }, "DetachVolume":{ "name":"DetachVolume", "http":{ @@ -4185,6 +4335,26 @@ "output":{"shape":"GetTransitGatewayRouteTablePropagationsResult"}, "documentation":"

Gets information about the route table propagations for the specified transit gateway route table.

" }, + "GetVerifiedAccessEndpointPolicy":{ + "name":"GetVerifiedAccessEndpointPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetVerifiedAccessEndpointPolicyRequest"}, + "output":{"shape":"GetVerifiedAccessEndpointPolicyResult"}, + "documentation":"

Get the Verified Access policy associated with the endpoint.

" + }, + "GetVerifiedAccessGroupPolicy":{ + "name":"GetVerifiedAccessGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetVerifiedAccessGroupPolicyRequest"}, + "output":{"shape":"GetVerifiedAccessGroupPolicyResult"}, + "documentation":"

Shows the contents of the Verified Access policy associated with the group.

" + }, "GetVpnConnectionDeviceSampleConfiguration":{ "name":"GetVpnConnectionDeviceSampleConfiguration", "http":{ @@ -4698,6 +4868,76 @@ "output":{"shape":"ModifyTransitGatewayVpcAttachmentResult"}, "documentation":"

Modifies the specified VPC attachment.

" }, + "ModifyVerifiedAccessEndpoint":{ + "name":"ModifyVerifiedAccessEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVerifiedAccessEndpointRequest"}, + "output":{"shape":"ModifyVerifiedAccessEndpointResult"}, + "documentation":"

Modifies the configuration of an Amazon Web Services Verified Access endpoint.

" + }, + "ModifyVerifiedAccessEndpointPolicy":{ + "name":"ModifyVerifiedAccessEndpointPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVerifiedAccessEndpointPolicyRequest"}, + "output":{"shape":"ModifyVerifiedAccessEndpointPolicyResult"}, + "documentation":"

Modifies the specified Verified Access endpoint policy.

" + }, + "ModifyVerifiedAccessGroup":{ + "name":"ModifyVerifiedAccessGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVerifiedAccessGroupRequest"}, + "output":{"shape":"ModifyVerifiedAccessGroupResult"}, + "documentation":"

Modifies the specified Verified Access group configuration.

" + }, + "ModifyVerifiedAccessGroupPolicy":{ + "name":"ModifyVerifiedAccessGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVerifiedAccessGroupPolicyRequest"}, + "output":{"shape":"ModifyVerifiedAccessGroupPolicyResult"}, + "documentation":"

Modifies the specified Verified Access group policy.

" + }, + "ModifyVerifiedAccessInstance":{ + "name":"ModifyVerifiedAccessInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVerifiedAccessInstanceRequest"}, + "output":{"shape":"ModifyVerifiedAccessInstanceResult"}, + "documentation":"

Modifies the configuration of the specified Verified Access instance.

" + }, + "ModifyVerifiedAccessInstanceLoggingConfiguration":{ + "name":"ModifyVerifiedAccessInstanceLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVerifiedAccessInstanceLoggingConfigurationRequest"}, + "output":{"shape":"ModifyVerifiedAccessInstanceLoggingConfigurationResult"}, + "documentation":"

Modifies the logging configuration for the specified Amazon Web Services Verified Access instance.

" + }, + "ModifyVerifiedAccessTrustProvider":{ + "name":"ModifyVerifiedAccessTrustProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVerifiedAccessTrustProviderRequest"}, + "output":{"shape":"ModifyVerifiedAccessTrustProviderResult"}, + "documentation":"

Modifies the configuration of the specified Amazon Web Services Verified Access trust provider.

" + }, "ModifyVolume":{ "name":"ModifyVolume", "http":{ @@ -7781,6 +8021,47 @@ }, "documentation":"

Contains the output of AttachNetworkInterface.

" }, + "AttachVerifiedAccessTrustProviderRequest":{ + "type":"structure", + "required":[ + "VerifiedAccessInstanceId", + "VerifiedAccessTrustProviderId" + ], + "members":{ + "VerifiedAccessInstanceId":{ + "shape":"VerifiedAccessInstanceId", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

" + }, + "VerifiedAccessTrustProviderId":{ + "shape":"VerifiedAccessTrustProviderId", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "AttachVerifiedAccessTrustProviderResult":{ + "type":"structure", + "members":{ + "VerifiedAccessTrustProvider":{ + "shape":"VerifiedAccessTrustProvider", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

", + "locationName":"verifiedAccessTrustProvider" + }, + "VerifiedAccessInstance":{ + "shape":"VerifiedAccessInstance", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

", + "locationName":"verifiedAccessInstance" + } + } + }, "AttachVolumeRequest":{ "type":"structure", "required":[ @@ -9484,6 +9765,7 @@ "deleted" ] }, + "CertificateArn":{"type":"string"}, "CertificateAuthentication":{ "type":"structure", "members":{ @@ -14055,6 +14337,316 @@ } } }, + "CreateVerifiedAccessEndpointEniOptions":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

The ID of the network interface.

" + }, + "Protocol":{ + "shape":"VerifiedAccessEndpointProtocol", + "documentation":"

The IP protocol.

" + }, + "Port":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"

The IP port number.

" + } + }, + "documentation":"

Options for a network interface-type endpoint.

" + }, + "CreateVerifiedAccessEndpointLoadBalancerOptions":{ + "type":"structure", + "members":{ + "Protocol":{ + "shape":"VerifiedAccessEndpointProtocol", + "documentation":"

The IP protocol.

" + }, + "Port":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"

The IP port number.

" + }, + "LoadBalancerArn":{ + "shape":"LoadBalancerArn", + "documentation":"

The ARN of the load balancer.

" + }, + "SubnetIds":{ + "shape":"CreateVerifiedAccessEndpointSubnetIdList", + "documentation":"

The IDs of the subnets.

", + "locationName":"SubnetId" + } + }, + "documentation":"

Describes a load balancer when creating an Amazon Web Services Verified Access endpoint using the load-balancer type.

" + }, + "CreateVerifiedAccessEndpointRequest":{ + "type":"structure", + "required":[ + "VerifiedAccessGroupId", + "EndpointType", + "AttachmentType", + "DomainCertificateArn", + "ApplicationDomain", + "EndpointDomainPrefix" + ], + "members":{ + "VerifiedAccessGroupId":{ + "shape":"VerifiedAccessGroupId", + "documentation":"

The ID of the Verified Access group to associate the endpoint with.

" + }, + "EndpointType":{ + "shape":"VerifiedAccessEndpointType", + "documentation":"

The type of Amazon Web Services Verified Access endpoint to create.

" + }, + "AttachmentType":{ + "shape":"VerifiedAccessEndpointAttachmentType", + "documentation":"

The Amazon Web Services network component Verified Access attaches to.

" + }, + "DomainCertificateArn":{ + "shape":"CertificateArn", + "documentation":"

The ARN of the public TLS/SSL certificate in Amazon Web Services Certificate Manager to associate with the endpoint. The CN in the certificate must match the DNS name your end users will use to reach your application.

" + }, + "ApplicationDomain":{ + "shape":"String", + "documentation":"

The DNS name for users to reach your application.

" + }, + "EndpointDomainPrefix":{ + "shape":"String", + "documentation":"

A custom identifier that gets prepended to a DNS name that is generated for the endpoint.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

The Amazon EC2 security groups to associate with the Amazon Web Services Verified Access endpoint.

", + "locationName":"SecurityGroupId" + }, + "LoadBalancerOptions":{ + "shape":"CreateVerifiedAccessEndpointLoadBalancerOptions", + "documentation":"

The load balancer details if creating the Amazon Web Services Verified Access endpoint as load-balancertype.

" + }, + "NetworkInterfaceOptions":{ + "shape":"CreateVerifiedAccessEndpointEniOptions", + "documentation":"

The network interface details if creating the Amazon Web Services Verified Access endpoint as network-interfacetype.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access endpoint.

" + }, + "PolicyDocument":{ + "shape":"String", + "documentation":"

The Amazon Web Services Verified Access policy document.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the Amazon Web Services Verified Access endpoint.

", + "locationName":"TagSpecification" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "CreateVerifiedAccessEndpointResult":{ + "type":"structure", + "members":{ + "VerifiedAccessEndpoint":{ + "shape":"VerifiedAccessEndpoint", + "documentation":"

The ID of the Amazon Web Services Verified Access endpoint.

", + "locationName":"verifiedAccessEndpoint" + } + } + }, + "CreateVerifiedAccessEndpointSubnetIdList":{ + "type":"list", + "member":{ + "shape":"SubnetId", + "locationName":"item" + } + }, + "CreateVerifiedAccessGroupRequest":{ + "type":"structure", + "required":["VerifiedAccessInstanceId"], + "members":{ + "VerifiedAccessInstanceId":{ + "shape":"VerifiedAccessInstanceId", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access group.

" + }, + "PolicyDocument":{ + "shape":"String", + "documentation":"

The Amazon Web Services Verified Access policy document.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the Amazon Web Services Verified Access group.

", + "locationName":"TagSpecification" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "CreateVerifiedAccessGroupResult":{ + "type":"structure", + "members":{ + "VerifiedAccessGroup":{ + "shape":"VerifiedAccessGroup", + "documentation":"

The ID of the Verified Access group.

", + "locationName":"verifiedAccessGroup" + } + } + }, + "CreateVerifiedAccessInstanceRequest":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access instance.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the Amazon Web Services Verified Access instance.

", + "locationName":"TagSpecification" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "CreateVerifiedAccessInstanceResult":{ + "type":"structure", + "members":{ + "VerifiedAccessInstance":{ + "shape":"VerifiedAccessInstance", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

", + "locationName":"verifiedAccessInstance" + } + } + }, + "CreateVerifiedAccessTrustProviderDeviceOptions":{ + "type":"structure", + "members":{ + "TenantId":{ + "shape":"String", + "documentation":"

The ID of the tenant application with the device-identity provider.

" + } + }, + "documentation":"

Options for a device-identity type trust provider.

" + }, + "CreateVerifiedAccessTrustProviderOidcOptions":{ + "type":"structure", + "members":{ + "Issuer":{ + "shape":"String", + "documentation":"

The OIDC issuer.

" + }, + "AuthorizationEndpoint":{ + "shape":"String", + "documentation":"

The OIDC authorization endpoint.

" + }, + "TokenEndpoint":{ + "shape":"String", + "documentation":"

The OIDC token endpoint.

" + }, + "UserInfoEndpoint":{ + "shape":"String", + "documentation":"

The OIDC user info endpoint.

" + }, + "ClientId":{ + "shape":"String", + "documentation":"

The client identifier.

" + }, + "ClientSecret":{ + "shape":"String", + "documentation":"

The client secret.

" + }, + "Scope":{ + "shape":"String", + "documentation":"

OpenID Connect (OIDC) scopes are used by an application during authentication to authorize access to a user's details. Each scope returns a specific set of user attributes.

" + } + }, + "documentation":"

Options for an OIDC-based, user-identity type trust provider.

" + }, + "CreateVerifiedAccessTrustProviderRequest":{ + "type":"structure", + "required":[ + "TrustProviderType", + "PolicyReferenceName" + ], + "members":{ + "TrustProviderType":{ + "shape":"TrustProviderType", + "documentation":"

The type of trust provider can be either user or device-based.

" + }, + "UserTrustProviderType":{ + "shape":"UserTrustProviderType", + "documentation":"

The type of user-based trust provider.

" + }, + "DeviceTrustProviderType":{ + "shape":"DeviceTrustProviderType", + "documentation":"

The type of device-based trust provider.

" + }, + "OidcOptions":{ + "shape":"CreateVerifiedAccessTrustProviderOidcOptions", + "documentation":"

The OpenID Connect details for an oidc-type, user-identity based trust provider.

" + }, + "DeviceOptions":{ + "shape":"CreateVerifiedAccessTrustProviderDeviceOptions", + "documentation":"

The options for device identity based trust providers.

" + }, + "PolicyReferenceName":{ + "shape":"String", + "documentation":"

The identifier to be used when working with policy rules.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access trust provider.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the Amazon Web Services Verified Access trust provider.

", + "locationName":"TagSpecification" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "CreateVerifiedAccessTrustProviderResult":{ + "type":"structure", + "members":{ + "VerifiedAccessTrustProvider":{ + "shape":"VerifiedAccessTrustProvider", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

", + "locationName":"verifiedAccessTrustProvider" + } + } + }, "CreateVolumePermission":{ "type":"structure", "members":{ @@ -16367,6 +16959,122 @@ } } }, + "DeleteVerifiedAccessEndpointRequest":{ + "type":"structure", + "required":["VerifiedAccessEndpointId"], + "members":{ + "VerifiedAccessEndpointId":{ + "shape":"VerifiedAccessEndpointId", + "documentation":"

The ID of the Amazon Web Services Verified Access endpoint.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DeleteVerifiedAccessEndpointResult":{ + "type":"structure", + "members":{ + "VerifiedAccessEndpoint":{ + "shape":"VerifiedAccessEndpoint", + "documentation":"

The ID of the Amazon Web Services Verified Access endpoint.

", + "locationName":"verifiedAccessEndpoint" + } + } + }, + "DeleteVerifiedAccessGroupRequest":{ + "type":"structure", + "required":["VerifiedAccessGroupId"], + "members":{ + "VerifiedAccessGroupId":{ + "shape":"VerifiedAccessGroupId", + "documentation":"

The ID of the Amazon Web Services Verified Access group.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DeleteVerifiedAccessGroupResult":{ + "type":"structure", + "members":{ + "VerifiedAccessGroup":{ + "shape":"VerifiedAccessGroup", + "documentation":"

The ID of the Amazon Web Services Verified Access group.

", + "locationName":"verifiedAccessGroup" + } + } + }, + "DeleteVerifiedAccessInstanceRequest":{ + "type":"structure", + "required":["VerifiedAccessInstanceId"], + "members":{ + "VerifiedAccessInstanceId":{ + "shape":"VerifiedAccessInstanceId", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + } + } + }, + "DeleteVerifiedAccessInstanceResult":{ + "type":"structure", + "members":{ + "VerifiedAccessInstance":{ + "shape":"VerifiedAccessInstance", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

", + "locationName":"verifiedAccessInstance" + } + } + }, + "DeleteVerifiedAccessTrustProviderRequest":{ + "type":"structure", + "required":["VerifiedAccessTrustProviderId"], + "members":{ + "VerifiedAccessTrustProviderId":{ + "shape":"VerifiedAccessTrustProviderId", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + } + } + }, + "DeleteVerifiedAccessTrustProviderResult":{ + "type":"structure", + "members":{ + "VerifiedAccessTrustProvider":{ + "shape":"VerifiedAccessTrustProvider", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

", + "locationName":"verifiedAccessTrustProvider" + } + } + }, "DeleteVolumeRequest":{ "type":"structure", "required":["VolumeId"], @@ -22123,6 +22831,253 @@ } } }, + "DescribeVerifiedAccessEndpointsMaxResults":{ + "type":"integer", + "max":1000, + "min":5 + }, + "DescribeVerifiedAccessEndpointsRequest":{ + "type":"structure", + "members":{ + "VerifiedAccessEndpointIds":{ + "shape":"VerifiedAccessEndpointIdList", + "documentation":"

The ID of the Amazon Web Services Verified Access endpoint.

", + "locationName":"VerifiedAccessEndpointId" + }, + "VerifiedAccessInstanceId":{ + "shape":"VerifiedAccessInstanceId", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

" + }, + "VerifiedAccessGroupId":{ + "shape":"VerifiedAccessGroupId", + "documentation":"

The ID of the Amazon Web Services Verified Access group.

" + }, + "MaxResults":{ + "shape":"DescribeVerifiedAccessEndpointsMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "locationName":"Filter" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DescribeVerifiedAccessEndpointsResult":{ + "type":"structure", + "members":{ + "VerifiedAccessEndpoints":{ + "shape":"VerifiedAccessEndpointList", + "documentation":"

The ID of the Amazon Web Services Verified Access endpoint.

", + "locationName":"verifiedAccessEndpointSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, + "DescribeVerifiedAccessGroupMaxResults":{ + "type":"integer", + "max":1000, + "min":5 + }, + "DescribeVerifiedAccessGroupsRequest":{ + "type":"structure", + "members":{ + "VerifiedAccessGroupIds":{ + "shape":"VerifiedAccessGroupIdList", + "documentation":"

The ID of the Amazon Web Services Verified Access groups.

", + "locationName":"VerifiedAccessGroupId" + }, + "VerifiedAccessInstanceId":{ + "shape":"VerifiedAccessInstanceId", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

" + }, + "MaxResults":{ + "shape":"DescribeVerifiedAccessGroupMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "locationName":"Filter" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DescribeVerifiedAccessGroupsResult":{ + "type":"structure", + "members":{ + "VerifiedAccessGroups":{ + "shape":"VerifiedAccessGroupList", + "documentation":"

The ID of the Verified Access group.

", + "locationName":"verifiedAccessGroupSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, + "DescribeVerifiedAccessInstanceLoggingConfigurationsMaxResults":{ + "type":"integer", + "max":10, + "min":1 + }, + "DescribeVerifiedAccessInstanceLoggingConfigurationsRequest":{ + "type":"structure", + "members":{ + "VerifiedAccessInstanceIds":{ + "shape":"VerifiedAccessInstanceIdList", + "documentation":"

The IDs of the Amazon Web Services Verified Access instances.

", + "locationName":"VerifiedAccessInstanceId" + }, + "MaxResults":{ + "shape":"DescribeVerifiedAccessInstanceLoggingConfigurationsMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "locationName":"Filter" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DescribeVerifiedAccessInstanceLoggingConfigurationsResult":{ + "type":"structure", + "members":{ + "LoggingConfigurations":{ + "shape":"VerifiedAccessInstanceLoggingConfigurationList", + "documentation":"

The current logging configuration for the Amazon Web Services Verified Access instances.

", + "locationName":"loggingConfigurationSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, + "DescribeVerifiedAccessInstancesMaxResults":{ + "type":"integer", + "max":200, + "min":5 + }, + "DescribeVerifiedAccessInstancesRequest":{ + "type":"structure", + "members":{ + "VerifiedAccessInstanceIds":{ + "shape":"VerifiedAccessInstanceIdList", + "documentation":"

The IDs of the Amazon Web Services Verified Access instances.

", + "locationName":"VerifiedAccessInstanceId" + }, + "MaxResults":{ + "shape":"DescribeVerifiedAccessInstancesMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "locationName":"Filter" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DescribeVerifiedAccessInstancesResult":{ + "type":"structure", + "members":{ + "VerifiedAccessInstances":{ + "shape":"VerifiedAccessInstanceList", + "documentation":"

The IDs of the Amazon Web Services Verified Access instances.

", + "locationName":"verifiedAccessInstanceSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, + "DescribeVerifiedAccessTrustProvidersMaxResults":{ + "type":"integer", + "max":200, + "min":5 + }, + "DescribeVerifiedAccessTrustProvidersRequest":{ + "type":"structure", + "members":{ + "VerifiedAccessTrustProviderIds":{ + "shape":"VerifiedAccessTrustProviderIdList", + "documentation":"

The IDs of the Amazon Web Services Verified Access trust providers.

", + "locationName":"VerifiedAccessTrustProviderId" + }, + "MaxResults":{ + "shape":"DescribeVerifiedAccessTrustProvidersMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "locationName":"Filter" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DescribeVerifiedAccessTrustProvidersResult":{ + "type":"structure", + "members":{ + "VerifiedAccessTrustProviders":{ + "shape":"VerifiedAccessTrustProviderList", + "documentation":"

The IDs of the Amazon Web Services Verified Access trust providers.

", + "locationName":"verifiedAccessTrustProviderSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "DescribeVolumeAttributeRequest":{ "type":"structure", "required":[ @@ -22957,6 +23912,47 @@ }, "documentation":"

Contains the parameters for DetachNetworkInterface.

" }, + "DetachVerifiedAccessTrustProviderRequest":{ + "type":"structure", + "required":[ + "VerifiedAccessInstanceId", + "VerifiedAccessTrustProviderId" + ], + "members":{ + "VerifiedAccessInstanceId":{ + "shape":"VerifiedAccessInstanceId", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

" + }, + "VerifiedAccessTrustProviderId":{ + "shape":"VerifiedAccessTrustProviderId", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DetachVerifiedAccessTrustProviderResult":{ + "type":"structure", + "members":{ + "VerifiedAccessTrustProvider":{ + "shape":"VerifiedAccessTrustProvider", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

", + "locationName":"verifiedAccessTrustProvider" + }, + "VerifiedAccessInstance":{ + "shape":"VerifiedAccessInstance", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

", + "locationName":"verifiedAccessInstance" + } + } + }, "DetachVolumeRequest":{ "type":"structure", "required":["VolumeId"], @@ -23007,6 +24003,24 @@ }, "documentation":"

Contains the parameters for DetachVpnGateway.

" }, + "DeviceOptions":{ + "type":"structure", + "members":{ + "TenantId":{ + "shape":"String", + "documentation":"

The ID of the tenant application with the device-identity provider.

", + "locationName":"tenantId" + } + }, + "documentation":"

Options for an Amazon Web Services Verified Access device-identity based trust provider.

" + }, + "DeviceTrustProviderType":{ + "type":"string", + "enum":[ + "jamf", + "crowdstrike" + ] + }, "DeviceType":{ "type":"string", "enum":[ @@ -28493,6 +29507,64 @@ } } }, + "GetVerifiedAccessEndpointPolicyRequest":{ + "type":"structure", + "required":["VerifiedAccessEndpointId"], + "members":{ + "VerifiedAccessEndpointId":{ + "shape":"VerifiedAccessEndpointId", + "documentation":"

The ID of the Amazon Web Services Verified Access endpoint.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "GetVerifiedAccessEndpointPolicyResult":{ + "type":"structure", + "members":{ + "PolicyEnabled":{ + "shape":"Boolean", + "documentation":"

The status of the Verified Access policy.

", + "locationName":"policyEnabled" + }, + "PolicyDocument":{ + "shape":"String", + "documentation":"

The Amazon Web Services Verified Access policy document.

", + "locationName":"policyDocument" + } + } + }, + "GetVerifiedAccessGroupPolicyRequest":{ + "type":"structure", + "required":["VerifiedAccessGroupId"], + "members":{ + "VerifiedAccessGroupId":{ + "shape":"VerifiedAccessGroupId", + "documentation":"

The ID of the Amazon Web Services Verified Access group.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "GetVerifiedAccessGroupPolicyResult":{ + "type":"structure", + "members":{ + "PolicyEnabled":{ + "shape":"Boolean", + "documentation":"

The status of the Verified Access policy.

", + "locationName":"policyEnabled" + }, + "PolicyDocument":{ + "shape":"String", + "documentation":"

The Amazon Web Services Verified Access policy document.

", + "locationName":"policyDocument" + } + } + }, "GetVpnConnectionDeviceSampleConfigurationRequest":{ "type":"structure", "required":[ @@ -32906,7 +33978,8 @@ "u-18tb1.112xlarge", "u-24tb1.112xlarge", "trn1.2xlarge", - "trn1.32xlarge" + "trn1.32xlarge", + "hpc6id.32xlarge" ] }, "InstanceTypeHypervisor":{ @@ -35891,6 +36964,7 @@ "closed" ] }, + "LoadBalancerArn":{"type":"string"}, "LoadBalancersConfig":{ "type":"structure", "members":{ @@ -38451,6 +39525,334 @@ } } }, + "ModifyVerifiedAccessEndpointEniOptions":{ + "type":"structure", + "members":{ + "Protocol":{ + "shape":"VerifiedAccessEndpointProtocol", + "documentation":"

The IP protocol.

" + }, + "Port":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"

The IP port number.

" + } + }, + "documentation":"

Options for a network-interface type Verified Access endpoint.

" + }, + "ModifyVerifiedAccessEndpointLoadBalancerOptions":{ + "type":"structure", + "members":{ + "SubnetIds":{ + "shape":"ModifyVerifiedAccessEndpointSubnetIdList", + "documentation":"

The IDs of the subnets.

", + "locationName":"SubnetId" + }, + "Protocol":{ + "shape":"VerifiedAccessEndpointProtocol", + "documentation":"

The IP protocol.

" + }, + "Port":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"

The IP port number.

" + } + }, + "documentation":"

Describes a load balancer when creating an Amazon Web Services Verified Access endpoint using the load-balancer type.

" + }, + "ModifyVerifiedAccessEndpointPolicyRequest":{ + "type":"structure", + "required":[ + "VerifiedAccessEndpointId", + "PolicyEnabled" + ], + "members":{ + "VerifiedAccessEndpointId":{ + "shape":"VerifiedAccessEndpointId", + "documentation":"

The ID of the Amazon Web Services Verified Access endpoint.

" + }, + "PolicyEnabled":{ + "shape":"Boolean", + "documentation":"

The status of the Verified Access policy.

" + }, + "PolicyDocument":{ + "shape":"String", + "documentation":"

The Amazon Web Services Verified Access policy document.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "ModifyVerifiedAccessEndpointPolicyResult":{ + "type":"structure", + "members":{ + "PolicyEnabled":{ + "shape":"Boolean", + "documentation":"

The status of the Verified Access policy.

", + "locationName":"policyEnabled" + }, + "PolicyDocument":{ + "shape":"String", + "documentation":"

The Amazon Web Services Verified Access policy document.

", + "locationName":"policyDocument" + } + } + }, + "ModifyVerifiedAccessEndpointRequest":{ + "type":"structure", + "required":["VerifiedAccessEndpointId"], + "members":{ + "VerifiedAccessEndpointId":{ + "shape":"VerifiedAccessEndpointId", + "documentation":"

The ID of the Amazon Web Services Verified Access endpoint.

" + }, + "VerifiedAccessGroupId":{ + "shape":"VerifiedAccessGroupId", + "documentation":"

The ID of the Amazon Web Services Verified Access group.

" + }, + "LoadBalancerOptions":{ + "shape":"ModifyVerifiedAccessEndpointLoadBalancerOptions", + "documentation":"

The load balancer details if creating the Amazon Web Services Verified Access endpoint as load-balancertype.

" + }, + "NetworkInterfaceOptions":{ + "shape":"ModifyVerifiedAccessEndpointEniOptions", + "documentation":"

The network interface options.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access endpoint.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "ModifyVerifiedAccessEndpointResult":{ + "type":"structure", + "members":{ + "VerifiedAccessEndpoint":{ + "shape":"VerifiedAccessEndpoint", + "documentation":"

The Amazon Web Services Verified Access endpoint details.

", + "locationName":"verifiedAccessEndpoint" + } + } + }, + "ModifyVerifiedAccessEndpointSubnetIdList":{ + "type":"list", + "member":{ + "shape":"SubnetId", + "locationName":"item" + } + }, + "ModifyVerifiedAccessGroupPolicyRequest":{ + "type":"structure", + "required":[ + "VerifiedAccessGroupId", + "PolicyEnabled" + ], + "members":{ + "VerifiedAccessGroupId":{ + "shape":"VerifiedAccessGroupId", + "documentation":"

The ID of the Amazon Web Services Verified Access group.

" + }, + "PolicyEnabled":{ + "shape":"Boolean", + "documentation":"

The status of the Verified Access policy.

" + }, + "PolicyDocument":{ + "shape":"String", + "documentation":"

The Amazon Web Services Verified Access policy document.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "ModifyVerifiedAccessGroupPolicyResult":{ + "type":"structure", + "members":{ + "PolicyEnabled":{ + "shape":"Boolean", + "documentation":"

The status of the Verified Access policy.

", + "locationName":"policyEnabled" + }, + "PolicyDocument":{ + "shape":"String", + "documentation":"

The Amazon Web Services Verified Access policy document.

", + "locationName":"policyDocument" + } + } + }, + "ModifyVerifiedAccessGroupRequest":{ + "type":"structure", + "required":["VerifiedAccessGroupId"], + "members":{ + "VerifiedAccessGroupId":{ + "shape":"VerifiedAccessGroupId", + "documentation":"

The ID of the Amazon Web Services Verified Access group.

" + }, + "VerifiedAccessInstanceId":{ + "shape":"VerifiedAccessInstanceId", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access group.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "ModifyVerifiedAccessGroupResult":{ + "type":"structure", + "members":{ + "VerifiedAccessGroup":{ + "shape":"VerifiedAccessGroup", + "documentation":"

Details of Amazon Web Services Verified Access group.

", + "locationName":"verifiedAccessGroup" + } + } + }, + "ModifyVerifiedAccessInstanceLoggingConfigurationRequest":{ + "type":"structure", + "required":[ + "VerifiedAccessInstanceId", + "AccessLogs" + ], + "members":{ + "VerifiedAccessInstanceId":{ + "shape":"VerifiedAccessInstanceId", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

" + }, + "AccessLogs":{ + "shape":"VerifiedAccessLogOptions", + "documentation":"

The configuration options for Amazon Web Services Verified Access instances.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + } + } + }, + "ModifyVerifiedAccessInstanceLoggingConfigurationResult":{ + "type":"structure", + "members":{ + "LoggingConfiguration":{ + "shape":"VerifiedAccessInstanceLoggingConfiguration", + "documentation":"

The logging configuration for Amazon Web Services Verified Access instance.

", + "locationName":"loggingConfiguration" + } + } + }, + "ModifyVerifiedAccessInstanceRequest":{ + "type":"structure", + "required":["VerifiedAccessInstanceId"], + "members":{ + "VerifiedAccessInstanceId":{ + "shape":"VerifiedAccessInstanceId", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access instance.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + } + } + }, + "ModifyVerifiedAccessInstanceResult":{ + "type":"structure", + "members":{ + "VerifiedAccessInstance":{ + "shape":"VerifiedAccessInstance", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

", + "locationName":"verifiedAccessInstance" + } + } + }, + "ModifyVerifiedAccessTrustProviderOidcOptions":{ + "type":"structure", + "members":{ + "Scope":{ + "shape":"String", + "documentation":"

OpenID Connect (OIDC) scopes are used by an application during authentication to authorize access to a user's details. Each scope returns a specific set of user attributes.

" + } + }, + "documentation":"

OpenID Connect options for an oidc-type, user-identity based trust provider.

" + }, + "ModifyVerifiedAccessTrustProviderRequest":{ + "type":"structure", + "required":["VerifiedAccessTrustProviderId"], + "members":{ + "VerifiedAccessTrustProviderId":{ + "shape":"VerifiedAccessTrustProviderId", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

" + }, + "OidcOptions":{ + "shape":"ModifyVerifiedAccessTrustProviderOidcOptions", + "documentation":"

The OpenID Connect details for an oidc-type, user-identity based trust provider.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access trust provider.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "idempotencyToken":true + } + } + }, + "ModifyVerifiedAccessTrustProviderResult":{ + "type":"structure", + "members":{ + "VerifiedAccessTrustProvider":{ + "shape":"VerifiedAccessTrustProvider", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

", + "locationName":"verifiedAccessTrustProvider" + } + } + }, "ModifyVolumeAttributeRequest":{ "type":"structure", "required":["VolumeId"], @@ -40466,6 +41868,47 @@ "All Upfront" ] }, + "OidcOptions":{ + "type":"structure", + "members":{ + "Issuer":{ + "shape":"String", + "documentation":"

The OIDC issuer.

", + "locationName":"issuer" + }, + "AuthorizationEndpoint":{ + "shape":"String", + "documentation":"

The OIDC authorization endpoint.

", + "locationName":"authorizationEndpoint" + }, + "TokenEndpoint":{ + "shape":"String", + "documentation":"

The OIDC token endpoint.

", + "locationName":"tokenEndpoint" + }, + "UserInfoEndpoint":{ + "shape":"String", + "documentation":"

The OIDC user info endpoint.

", + "locationName":"userInfoEndpoint" + }, + "ClientId":{ + "shape":"String", + "documentation":"

The client identifier.

", + "locationName":"clientId" + }, + "ClientSecret":{ + "shape":"String", + "documentation":"

The client secret.

", + "locationName":"clientSecret" + }, + "Scope":{ + "shape":"String", + "documentation":"

The OpenID Connect (OIDC) scope specified.

", + "locationName":"scope" + } + }, + "documentation":"

Options for OIDC-based, user-identity type trust provider.

" + }, "OnDemandAllocationStrategy":{ "type":"string", "enum":[ @@ -44626,6 +46069,11 @@ "capacity-reservation-fleet", "traffic-mirror-filter-rule", "vpc-endpoint-connection-device-type", + "verified-access-instance", + "verified-access-group", + "verified-access-endpoint", + "verified-access-policy", + "verified-access-trust-provider", "vpn-connection-device-type" ] }, @@ -46432,6 +47880,13 @@ "documentation":"

Describes a security group.

" }, "SecurityGroupId":{"type":"string"}, + "SecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"SecurityGroupId", + "locationName":"item" + } + }, "SecurityGroupIdStringList":{ "type":"list", "member":{ @@ -51543,6 +52998,13 @@ "locationName":"item" } }, + "TrustProviderType":{ + "type":"string", + "enum":[ + "user", + "device" + ] + }, "TunnelInsideIpVersion":{ "type":"string", "enum":[ @@ -52052,6 +53514,13 @@ "locationName":"UserId" } }, + "UserTrustProviderType":{ + "type":"string", + "enum":[ + "iam-identity-center", + "oidc" + ] + }, "VCpuCount":{"type":"integer"}, "VCpuCountRange":{ "type":"structure", @@ -52149,6 +53618,662 @@ "locationName":"item" } }, + "VerifiedAccessEndpoint":{ + "type":"structure", + "members":{ + "VerifiedAccessInstanceId":{ + "shape":"String", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

", + "locationName":"verifiedAccessInstanceId" + }, + "VerifiedAccessGroupId":{ + "shape":"String", + "documentation":"

The ID of the Amazon Web Services Verified Access group.

", + "locationName":"verifiedAccessGroupId" + }, + "VerifiedAccessEndpointId":{ + "shape":"String", + "documentation":"

The ID of the Amazon Web Services Verified Access endpoint.

", + "locationName":"verifiedAccessEndpointId" + }, + "ApplicationDomain":{ + "shape":"String", + "documentation":"

The DNS name for users to reach your application.

", + "locationName":"applicationDomain" + }, + "EndpointType":{ + "shape":"VerifiedAccessEndpointType", + "documentation":"

The type of Amazon Web Services Verified Access endpoint. Incoming application requests will be sent to an IP address, load balancer or a network interface depending on the endpoint type specified.

", + "locationName":"endpointType" + }, + "AttachmentType":{ + "shape":"VerifiedAccessEndpointAttachmentType", + "documentation":"

The type of attachment used to provide connectivity between the Amazon Web Services Verified Access endpoint and the application.

", + "locationName":"attachmentType" + }, + "DomainCertificateArn":{ + "shape":"String", + "documentation":"

The ARN of a public TLS/SSL certificate imported into or created with ACM.

", + "locationName":"domainCertificateArn" + }, + "EndpointDomain":{ + "shape":"String", + "documentation":"

A DNS name that is generated for the endpoint.

", + "locationName":"endpointDomain" + }, + "DeviceValidationDomain":{ + "shape":"String", + "documentation":"

Returned if endpoint has a device trust provider attached.

", + "locationName":"deviceValidationDomain" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

The IDs of the security groups for the endpoint.

", + "locationName":"securityGroupIdSet" + }, + "LoadBalancerOptions":{ + "shape":"VerifiedAccessEndpointLoadBalancerOptions", + "documentation":"

The load balancer details if creating the Amazon Web Services Verified Access endpoint as load-balancertype.

", + "locationName":"loadBalancerOptions" + }, + "NetworkInterfaceOptions":{ + "shape":"VerifiedAccessEndpointEniOptions", + "documentation":"

The options for network-interface type endpoint.

", + "locationName":"networkInterfaceOptions" + }, + "Status":{ + "shape":"VerifiedAccessEndpointStatus", + "documentation":"

The endpoint status.

", + "locationName":"status" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access endpoint.

", + "locationName":"description" + }, + "CreationTime":{ + "shape":"String", + "documentation":"

The creation time.

", + "locationName":"creationTime" + }, + "LastUpdatedTime":{ + "shape":"String", + "documentation":"

The last updated time.

", + "locationName":"lastUpdatedTime" + }, + "DeletionTime":{ + "shape":"String", + "documentation":"

The deletion time.

", + "locationName":"deletionTime" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags.

", + "locationName":"tagSet" + } + }, + "documentation":"

An Amazon Web Services Verified Access endpoint specifies the application that Amazon Web Services Verified Access provides access to. It must be attached to an Amazon Web Services Verified Access group. An Amazon Web Services Verified Access endpoint must also have an attached access policy before you attached it to a group.

" + }, + "VerifiedAccessEndpointAttachmentType":{ + "type":"string", + "enum":["vpc"] + }, + "VerifiedAccessEndpointEniOptions":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

The ID of the network interface.

", + "locationName":"networkInterfaceId" + }, + "Protocol":{ + "shape":"VerifiedAccessEndpointProtocol", + "documentation":"

The IP protocol.

", + "locationName":"protocol" + }, + "Port":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"

The IP port number.

", + "locationName":"port" + } + }, + "documentation":"

Options for a network-interface type endpoint.

" + }, + "VerifiedAccessEndpointId":{"type":"string"}, + "VerifiedAccessEndpointIdList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessEndpointId", + "locationName":"item" + } + }, + "VerifiedAccessEndpointList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessEndpoint", + "locationName":"item" + } + }, + "VerifiedAccessEndpointLoadBalancerOptions":{ + "type":"structure", + "members":{ + "Protocol":{ + "shape":"VerifiedAccessEndpointProtocol", + "documentation":"

The IP protocol.

", + "locationName":"protocol" + }, + "Port":{ + "shape":"VerifiedAccessEndpointPortNumber", + "documentation":"

The IP port number.

", + "locationName":"port" + }, + "LoadBalancerArn":{ + "shape":"String", + "documentation":"

The ARN of the load balancer.

", + "locationName":"loadBalancerArn" + }, + "SubnetIds":{ + "shape":"VerifiedAccessEndpointSubnetIdList", + "documentation":"

The IDs of the subnets.

", + "locationName":"subnetIdSet" + } + }, + "documentation":"

Describes a load balancer when creating an Amazon Web Services Verified Access endpoint using the load-balancer type.

" + }, + "VerifiedAccessEndpointPortNumber":{ + "type":"integer", + "max":65535, + "min":1 + }, + "VerifiedAccessEndpointProtocol":{ + "type":"string", + "enum":[ + "http", + "https" + ] + }, + "VerifiedAccessEndpointStatus":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"VerifiedAccessEndpointStatusCode", + "documentation":"

The status code of the Verified Access endpoint.

", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "documentation":"

The status message of the Verified Access endpoint.

", + "locationName":"message" + } + }, + "documentation":"

Describes the status of a Verified Access endpoint.

" + }, + "VerifiedAccessEndpointStatusCode":{ + "type":"string", + "enum":[ + "pending", + "active", + "updating", + "deleting", + "deleted" + ] + }, + "VerifiedAccessEndpointSubnetIdList":{ + "type":"list", + "member":{ + "shape":"SubnetId", + "locationName":"item" + } + }, + "VerifiedAccessEndpointType":{ + "type":"string", + "enum":[ + "load-balancer", + "network-interface" + ] + }, + "VerifiedAccessGroup":{ + "type":"structure", + "members":{ + "VerifiedAccessGroupId":{ + "shape":"String", + "documentation":"

The ID of the Verified Access group.

", + "locationName":"verifiedAccessGroupId" + }, + "VerifiedAccessInstanceId":{ + "shape":"String", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

", + "locationName":"verifiedAccessInstanceId" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access group.

", + "locationName":"description" + }, + "Owner":{ + "shape":"String", + "documentation":"

The Amazon Web Services account number that owns the group.

", + "locationName":"owner" + }, + "VerifiedAccessGroupArn":{ + "shape":"String", + "documentation":"

The ARN of the Verified Access group.

", + "locationName":"verifiedAccessGroupArn" + }, + "CreationTime":{ + "shape":"String", + "documentation":"

The creation time.

", + "locationName":"creationTime" + }, + "LastUpdatedTime":{ + "shape":"String", + "documentation":"

The last updated time.

", + "locationName":"lastUpdatedTime" + }, + "DeletionTime":{ + "shape":"String", + "documentation":"

The deletion time.

", + "locationName":"deletionTime" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags.

", + "locationName":"tagSet" + } + }, + "documentation":"

Describes a Verified Access group.

" + }, + "VerifiedAccessGroupId":{"type":"string"}, + "VerifiedAccessGroupIdList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessGroupId", + "locationName":"item" + } + }, + "VerifiedAccessGroupList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessGroup", + "locationName":"item" + } + }, + "VerifiedAccessInstance":{ + "type":"structure", + "members":{ + "VerifiedAccessInstanceId":{ + "shape":"String", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

", + "locationName":"verifiedAccessInstanceId" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access instance.

", + "locationName":"description" + }, + "VerifiedAccessTrustProviders":{ + "shape":"VerifiedAccessTrustProviderCondensedList", + "documentation":"

The IDs of the Amazon Web Services Verified Access trust providers.

", + "locationName":"verifiedAccessTrustProviderSet" + }, + "CreationTime":{ + "shape":"String", + "documentation":"

The creation time.

", + "locationName":"creationTime" + }, + "LastUpdatedTime":{ + "shape":"String", + "documentation":"

The last updated time.

", + "locationName":"lastUpdatedTime" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags.

", + "locationName":"tagSet" + } + }, + "documentation":"

Describes a Verified Access instance.

" + }, + "VerifiedAccessInstanceId":{"type":"string"}, + "VerifiedAccessInstanceIdList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessInstanceId", + "locationName":"item" + } + }, + "VerifiedAccessInstanceList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessInstance", + "locationName":"item" + } + }, + "VerifiedAccessInstanceLoggingConfiguration":{ + "type":"structure", + "members":{ + "VerifiedAccessInstanceId":{ + "shape":"String", + "documentation":"

The ID of the Amazon Web Services Verified Access instance.

", + "locationName":"verifiedAccessInstanceId" + }, + "AccessLogs":{ + "shape":"VerifiedAccessLogs", + "documentation":"

Details about the logging options.

", + "locationName":"accessLogs" + } + }, + "documentation":"

Describes logging options for an Amazon Web Services Verified Access instance.

" + }, + "VerifiedAccessInstanceLoggingConfigurationList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessInstanceLoggingConfiguration", + "locationName":"item" + } + }, + "VerifiedAccessLogCloudWatchLogsDestination":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether logging is enabled.

", + "locationName":"enabled" + }, + "DeliveryStatus":{ + "shape":"VerifiedAccessLogDeliveryStatus", + "documentation":"

The delivery status for access logs.

", + "locationName":"deliveryStatus" + }, + "LogGroup":{ + "shape":"String", + "documentation":"

The ID of the CloudWatch Logs log group.

", + "locationName":"logGroup" + } + }, + "documentation":"

Options for CloudWatch Logs as a logging destination.

" + }, + "VerifiedAccessLogCloudWatchLogsDestinationOptions":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether logging is enabled.

" + }, + "LogGroup":{ + "shape":"String", + "documentation":"

The ID of the CloudWatch Logs log group.

" + } + }, + "documentation":"

Options for CloudWatch Logs as a logging destination.

" + }, + "VerifiedAccessLogDeliveryStatus":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"VerifiedAccessLogDeliveryStatusCode", + "documentation":"

The status code.

", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "documentation":"

The status message.

", + "locationName":"message" + } + }, + "documentation":"

Describes a log delivery status.

" + }, + "VerifiedAccessLogDeliveryStatusCode":{ + "type":"string", + "enum":[ + "success", + "failed" + ] + }, + "VerifiedAccessLogKinesisDataFirehoseDestination":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether logging is enabled.

", + "locationName":"enabled" + }, + "DeliveryStatus":{ + "shape":"VerifiedAccessLogDeliveryStatus", + "documentation":"

The delivery status.

", + "locationName":"deliveryStatus" + }, + "DeliveryStream":{ + "shape":"String", + "documentation":"

The ID of the delivery stream.

", + "locationName":"deliveryStream" + } + }, + "documentation":"

Options for Kinesis as a logging destination.

" + }, + "VerifiedAccessLogKinesisDataFirehoseDestinationOptions":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether logging is enabled.

" + }, + "DeliveryStream":{ + "shape":"String", + "documentation":"

The ID of the delivery stream.

" + } + }, + "documentation":"

Describes Amazon Kinesis Data Firehose logging options.

" + }, + "VerifiedAccessLogOptions":{ + "type":"structure", + "members":{ + "S3":{ + "shape":"VerifiedAccessLogS3DestinationOptions", + "documentation":"

Sends Verified Access logs to Amazon S3.

" + }, + "CloudWatchLogs":{ + "shape":"VerifiedAccessLogCloudWatchLogsDestinationOptions", + "documentation":"

Sends Verified Access logs to CloudWatch Logs.

" + }, + "KinesisDataFirehose":{ + "shape":"VerifiedAccessLogKinesisDataFirehoseDestinationOptions", + "documentation":"

Sends Verified Access logs to Kinesis.

" + } + }, + "documentation":"

Describes the destinations for Verified Access logs.

" + }, + "VerifiedAccessLogS3Destination":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether logging is enabled.

", + "locationName":"enabled" + }, + "DeliveryStatus":{ + "shape":"VerifiedAccessLogDeliveryStatus", + "documentation":"

The delivery status.

", + "locationName":"deliveryStatus" + }, + "BucketName":{ + "shape":"String", + "documentation":"

The bucket name.

", + "locationName":"bucketName" + }, + "Prefix":{ + "shape":"String", + "documentation":"

The bucket prefix.

", + "locationName":"prefix" + }, + "BucketOwner":{ + "shape":"String", + "documentation":"

The Amazon Web Services account number that owns the bucket.

", + "locationName":"bucketOwner" + } + }, + "documentation":"

Options for Amazon S3 as a logging destination.

" + }, + "VerifiedAccessLogS3DestinationOptions":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether logging is enabled.

" + }, + "BucketName":{ + "shape":"String", + "documentation":"

The bucket name.

" + }, + "Prefix":{ + "shape":"String", + "documentation":"

The bucket prefix.

" + }, + "BucketOwner":{ + "shape":"String", + "documentation":"

The ID of the Amazon Web Services account that owns the Amazon S3 bucket.

" + } + }, + "documentation":"

Options for Amazon S3 as a logging destination.

" + }, + "VerifiedAccessLogs":{ + "type":"structure", + "members":{ + "S3":{ + "shape":"VerifiedAccessLogS3Destination", + "documentation":"

Amazon S3 logging options.

", + "locationName":"s3" + }, + "CloudWatchLogs":{ + "shape":"VerifiedAccessLogCloudWatchLogsDestination", + "documentation":"

CloudWatch Logs logging destination.

", + "locationName":"cloudWatchLogs" + }, + "KinesisDataFirehose":{ + "shape":"VerifiedAccessLogKinesisDataFirehoseDestination", + "documentation":"

Kinesis logging destination.

", + "locationName":"kinesisDataFirehose" + } + }, + "documentation":"

Describes the destinations for Verified Access logs.

" + }, + "VerifiedAccessTrustProvider":{ + "type":"structure", + "members":{ + "VerifiedAccessTrustProviderId":{ + "shape":"String", + "documentation":"

The ID of the Amazon Web Services Verified Access trust provider.

", + "locationName":"verifiedAccessTrustProviderId" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the Amazon Web Services Verified Access trust provider.

", + "locationName":"description" + }, + "TrustProviderType":{ + "shape":"TrustProviderType", + "documentation":"

The type of Verified Access trust provider.

", + "locationName":"trustProviderType" + }, + "UserTrustProviderType":{ + "shape":"UserTrustProviderType", + "documentation":"

The type of user-based trust provider.

", + "locationName":"userTrustProviderType" + }, + "DeviceTrustProviderType":{ + "shape":"DeviceTrustProviderType", + "documentation":"

The type of device-based trust provider.

", + "locationName":"deviceTrustProviderType" + }, + "OidcOptions":{ + "shape":"OidcOptions", + "documentation":"

The OpenID Connect details for an oidc-type, user-identity based trust provider.

", + "locationName":"oidcOptions" + }, + "DeviceOptions":{ + "shape":"DeviceOptions", + "documentation":"

The options for device-identity type trust provider.

", + "locationName":"deviceOptions" + }, + "PolicyReferenceName":{ + "shape":"String", + "documentation":"

The identifier to be used when working with policy rules.

", + "locationName":"policyReferenceName" + }, + "CreationTime":{ + "shape":"String", + "documentation":"

The creation time.

", + "locationName":"creationTime" + }, + "LastUpdatedTime":{ + "shape":"String", + "documentation":"

The last updated time.

", + "locationName":"lastUpdatedTime" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags.

", + "locationName":"tagSet" + } + }, + "documentation":"

Describes a Verified Access trust provider.

" + }, + "VerifiedAccessTrustProviderCondensed":{ + "type":"structure", + "members":{ + "VerifiedAccessTrustProviderId":{ + "shape":"String", + "documentation":"

The ID of the trust provider.

", + "locationName":"verifiedAccessTrustProviderId" + }, + "Description":{ + "shape":"String", + "documentation":"

The description of trust provider.

", + "locationName":"description" + }, + "TrustProviderType":{ + "shape":"TrustProviderType", + "documentation":"

The type of trust provider (user- or device-based).

", + "locationName":"trustProviderType" + }, + "UserTrustProviderType":{ + "shape":"UserTrustProviderType", + "documentation":"

The type of user-based trust provider.

", + "locationName":"userTrustProviderType" + }, + "DeviceTrustProviderType":{ + "shape":"DeviceTrustProviderType", + "documentation":"

The type of device-based trust provider.

", + "locationName":"deviceTrustProviderType" + } + }, + "documentation":"

Condensed information about a trust provider.

" + }, + "VerifiedAccessTrustProviderCondensedList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessTrustProviderCondensed", + "locationName":"item" + } + }, + "VerifiedAccessTrustProviderId":{"type":"string"}, + "VerifiedAccessTrustProviderIdList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessTrustProviderId", + "locationName":"item" + } + }, + "VerifiedAccessTrustProviderList":{ + "type":"list", + "member":{ + "shape":"VerifiedAccessTrustProvider", + "locationName":"item" + } + }, "VersionDescription":{ "type":"string", "max":255, diff --git a/botocore/data/firehose/2015-08-04/service-2.json b/botocore/data/firehose/2015-08-04/service-2.json index d1603e023c..fb049781c0 100644 --- a/botocore/data/firehose/2015-08-04/service-2.json +++ b/botocore/data/firehose/2015-08-04/service-2.json @@ -27,7 +27,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidKMSResourceException"} ], - "documentation":"

Creates a Kinesis Data Firehose delivery stream.

By default, you can create up to 50 delivery streams per AWS Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.

" + "documentation":"

Creates a Kinesis Data Firehose delivery stream.

By default, you can create up to 50 delivery streams per Amazon Web Services Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.

" }, "DeleteDeliveryStream":{ "name":"DeleteDeliveryStream", @@ -160,7 +160,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

Each delivery stream can have up to 50 tags.

This operation has a limit of five transactions per second per account.

" + "documentation":"

Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

Each delivery stream can have up to 50 tags.

This operation has a limit of five transactions per second per account.

" }, "UntagDeliveryStream":{ "name":"UntagDeliveryStream", @@ -202,12 +202,179 @@ "min":1, "pattern":"arn:.*" }, + "AmazonOpenSearchServerlessBufferingHints":{ + "type":"structure", + "members":{ + "IntervalInSeconds":{ + "shape":"AmazonOpenSearchServerlessBufferingIntervalInSeconds", + "documentation":"

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).

" + }, + "SizeInMBs":{ + "shape":"AmazonOpenSearchServerlessBufferingSizeInMBs", + "documentation":"

Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

" + } + }, + "documentation":"

Describes the buffering to perform before delivering data to the Serverless offering for Amazon OpenSearch Service destination.

" + }, + "AmazonOpenSearchServerlessBufferingIntervalInSeconds":{ + "type":"integer", + "max":900, + "min":60 + }, + "AmazonOpenSearchServerlessBufferingSizeInMBs":{ + "type":"integer", + "max":100, + "min":1 + }, + "AmazonOpenSearchServerlessCollectionEndpoint":{ + "type":"string", + "max":512, + "min":1, + "pattern":"https:.*" + }, + "AmazonOpenSearchServerlessDestinationConfiguration":{ + "type":"structure", + "required":[ + "RoleARN", + "IndexName", + "S3Configuration" + ], + "members":{ + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

" + }, + "CollectionEndpoint":{ + "shape":"AmazonOpenSearchServerlessCollectionEndpoint", + "documentation":"

The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.

" + }, + "IndexName":{ + "shape":"AmazonOpenSearchServerlessIndexName", + "documentation":"

The Serverless offering for Amazon OpenSearch Service index name.

" + }, + "BufferingHints":{ + "shape":"AmazonOpenSearchServerlessBufferingHints", + "documentation":"

The buffering options. If no value is specified, the default values for AmazonopensearchserviceBufferingHints are used.

" + }, + "RetryOptions":{ + "shape":"AmazonOpenSearchServerlessRetryOptions", + "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + }, + "S3BackupMode":{ + "shape":"AmazonOpenSearchServerlessS3BackupMode", + "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

" + }, + "S3Configuration":{"shape":"S3DestinationConfiguration"}, + "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, + "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, + "VpcConfiguration":{"shape":"VpcConfiguration"} + }, + "documentation":"

Describes the configuration of a destination in the Serverless offering for Amazon OpenSearch Service.

" + }, + "AmazonOpenSearchServerlessDestinationDescription":{ + "type":"structure", + "members":{ + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + }, + "CollectionEndpoint":{ + "shape":"AmazonOpenSearchServerlessCollectionEndpoint", + "documentation":"

The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.

" + }, + "IndexName":{ + "shape":"AmazonOpenSearchServerlessIndexName", + "documentation":"

The Serverless offering for Amazon OpenSearch Service index name.

" + }, + "BufferingHints":{ + "shape":"AmazonOpenSearchServerlessBufferingHints", + "documentation":"

The buffering options.

" + }, + "RetryOptions":{ + "shape":"AmazonOpenSearchServerlessRetryOptions", + "documentation":"

The Serverless offering for Amazon OpenSearch Service retry options.

" + }, + "S3BackupMode":{ + "shape":"AmazonOpenSearchServerlessS3BackupMode", + "documentation":"

The Amazon S3 backup mode.

" + }, + "S3DestinationDescription":{"shape":"S3DestinationDescription"}, + "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, + "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, + "VpcConfigurationDescription":{"shape":"VpcConfigurationDescription"} + }, + "documentation":"

The destination description in the Serverless offering for Amazon OpenSearch Service.

" + }, + "AmazonOpenSearchServerlessDestinationUpdate":{ + "type":"structure", + "members":{ + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

" + }, + "CollectionEndpoint":{ + "shape":"AmazonOpenSearchServerlessCollectionEndpoint", + "documentation":"

The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.

" + }, + "IndexName":{ + "shape":"AmazonOpenSearchServerlessIndexName", + "documentation":"

The Serverless offering for Amazon OpenSearch Service index name.

" + }, + "BufferingHints":{ + "shape":"AmazonOpenSearchServerlessBufferingHints", + "documentation":"

The buffering options. If no value is specified, AmazonopensearchBufferingHints object default values are used.

" + }, + "RetryOptions":{ + "shape":"AmazonOpenSearchServerlessRetryOptions", + "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + }, + "S3Update":{"shape":"S3DestinationUpdate"}, + "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, + "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"} + }, + "documentation":"

Describes an update for a destination in the Serverless offering for Amazon OpenSearch Service.

" + }, + "AmazonOpenSearchServerlessIndexName":{ + "type":"string", + "max":80, + "min":1, + "pattern":".*" + }, + "AmazonOpenSearchServerlessRetryDurationInSeconds":{ + "type":"integer", + "max":7200, + "min":0 + }, + "AmazonOpenSearchServerlessRetryOptions":{ + "type":"structure", + "members":{ + "DurationInSeconds":{ + "shape":"AmazonOpenSearchServerlessRetryDurationInSeconds", + "documentation":"

After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + } + }, + "documentation":"

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.

" + }, + "AmazonOpenSearchServerlessS3BackupMode":{ + "type":"string", + "enum":[ + "FailedDocumentsOnly", + "AllDocuments" + ] + }, "AmazonopensearchserviceBufferingHints":{ "type":"structure", "members":{ - "IntervalInSeconds":{"shape":"AmazonopensearchserviceBufferingIntervalInSeconds"}, - "SizeInMBs":{"shape":"AmazonopensearchserviceBufferingSizeInMBs"} - } + "IntervalInSeconds":{ + "shape":"AmazonopensearchserviceBufferingIntervalInSeconds", + "documentation":"

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).

" + }, + "SizeInMBs":{ + "shape":"AmazonopensearchserviceBufferingSizeInMBs", + "documentation":"

Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

" + } + }, + "documentation":"

Describes the buffering to perform before delivering data to the Amazon OpenSearch Service destination.

" }, "AmazonopensearchserviceBufferingIntervalInSeconds":{ "type":"integer", @@ -233,54 +400,135 @@ "S3Configuration" ], "members":{ - "RoleARN":{"shape":"RoleARN"}, - "DomainARN":{"shape":"AmazonopensearchserviceDomainARN"}, - "ClusterEndpoint":{"shape":"AmazonopensearchserviceClusterEndpoint"}, - "IndexName":{"shape":"AmazonopensearchserviceIndexName"}, - "TypeName":{"shape":"AmazonopensearchserviceTypeName"}, - "IndexRotationPeriod":{"shape":"AmazonopensearchserviceIndexRotationPeriod"}, - "BufferingHints":{"shape":"AmazonopensearchserviceBufferingHints"}, - "RetryOptions":{"shape":"AmazonopensearchserviceRetryOptions"}, - "S3BackupMode":{"shape":"AmazonopensearchserviceS3BackupMode"}, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

" + }, + "DomainARN":{ + "shape":"AmazonopensearchserviceDomainARN", + "documentation":"

The ARN of the Amazon OpenSearch Service domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN.

" + }, + "ClusterEndpoint":{ + "shape":"AmazonopensearchserviceClusterEndpoint", + "documentation":"

The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.

" + }, + "IndexName":{ + "shape":"AmazonopensearchserviceIndexName", + "documentation":"

The ElasticsearAmazon OpenSearch Service index name.

" + }, + "TypeName":{ + "shape":"AmazonopensearchserviceTypeName", + "documentation":"

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.

" + }, + "IndexRotationPeriod":{ + "shape":"AmazonopensearchserviceIndexRotationPeriod", + "documentation":"

The Amazon OpenSearch Service index rotation period. Index rotation appends a timestamp to the IndexName to facilitate the expiration of old data.

" + }, + "BufferingHints":{ + "shape":"AmazonopensearchserviceBufferingHints", + "documentation":"

The buffering options. If no value is specified, the default values for AmazonopensearchserviceBufferingHints are used.

" + }, + "RetryOptions":{ + "shape":"AmazonopensearchserviceRetryOptions", + "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + }, + "S3BackupMode":{ + "shape":"AmazonopensearchserviceS3BackupMode", + "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

" + }, "S3Configuration":{"shape":"S3DestinationConfiguration"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "VpcConfiguration":{"shape":"VpcConfiguration"} - } + }, + "documentation":"

Describes the configuration of a destination in Amazon OpenSearch Service

" }, "AmazonopensearchserviceDestinationDescription":{ "type":"structure", "members":{ - "RoleARN":{"shape":"RoleARN"}, - "DomainARN":{"shape":"AmazonopensearchserviceDomainARN"}, - "ClusterEndpoint":{"shape":"AmazonopensearchserviceClusterEndpoint"}, - "IndexName":{"shape":"AmazonopensearchserviceIndexName"}, - "TypeName":{"shape":"AmazonopensearchserviceTypeName"}, - "IndexRotationPeriod":{"shape":"AmazonopensearchserviceIndexRotationPeriod"}, - "BufferingHints":{"shape":"AmazonopensearchserviceBufferingHints"}, - "RetryOptions":{"shape":"AmazonopensearchserviceRetryOptions"}, - "S3BackupMode":{"shape":"AmazonopensearchserviceS3BackupMode"}, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials.

" + }, + "DomainARN":{ + "shape":"AmazonopensearchserviceDomainARN", + "documentation":"

The ARN of the Amazon OpenSearch Service domain.

" + }, + "ClusterEndpoint":{ + "shape":"AmazonopensearchserviceClusterEndpoint", + "documentation":"

The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.

" + }, + "IndexName":{ + "shape":"AmazonopensearchserviceIndexName", + "documentation":"

The Amazon OpenSearch Service index name.

" + }, + "TypeName":{ + "shape":"AmazonopensearchserviceTypeName", + "documentation":"

The Amazon OpenSearch Service type name. This applies to Elasticsearch 6.x and lower versions. For Elasticsearch 7.x and OpenSearch Service 1.x, there's no value for TypeName.

" + }, + "IndexRotationPeriod":{ + "shape":"AmazonopensearchserviceIndexRotationPeriod", + "documentation":"

The Amazon OpenSearch Service index rotation period

" + }, + "BufferingHints":{ + "shape":"AmazonopensearchserviceBufferingHints", + "documentation":"

The buffering options.

" + }, + "RetryOptions":{ + "shape":"AmazonopensearchserviceRetryOptions", + "documentation":"

The Amazon OpenSearch Service retry options.

" + }, + "S3BackupMode":{ + "shape":"AmazonopensearchserviceS3BackupMode", + "documentation":"

The Amazon S3 backup mode.

" + }, "S3DestinationDescription":{"shape":"S3DestinationDescription"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "VpcConfigurationDescription":{"shape":"VpcConfigurationDescription"} - } + }, + "documentation":"

The destination description in Amazon OpenSearch Service.

" }, "AmazonopensearchserviceDestinationUpdate":{ "type":"structure", "members":{ - "RoleARN":{"shape":"RoleARN"}, - "DomainARN":{"shape":"AmazonopensearchserviceDomainARN"}, - "ClusterEndpoint":{"shape":"AmazonopensearchserviceClusterEndpoint"}, - "IndexName":{"shape":"AmazonopensearchserviceIndexName"}, - "TypeName":{"shape":"AmazonopensearchserviceTypeName"}, - "IndexRotationPeriod":{"shape":"AmazonopensearchserviceIndexRotationPeriod"}, - "BufferingHints":{"shape":"AmazonopensearchserviceBufferingHints"}, - "RetryOptions":{"shape":"AmazonopensearchserviceRetryOptions"}, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

" + }, + "DomainARN":{ + "shape":"AmazonopensearchserviceDomainARN", + "documentation":"

The ARN of the Amazon OpenSearch Service domain. The IAM role must have permissions for DescribeDomain, DescribeDomains, and DescribeDomainConfig after assuming the IAM role specified in RoleARN.

" + }, + "ClusterEndpoint":{ + "shape":"AmazonopensearchserviceClusterEndpoint", + "documentation":"

The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.

" + }, + "IndexName":{ + "shape":"AmazonopensearchserviceIndexName", + "documentation":"

The Amazon OpenSearch Service index name.

" + }, + "TypeName":{ + "shape":"AmazonopensearchserviceTypeName", + "documentation":"

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" + }, + "IndexRotationPeriod":{ + "shape":"AmazonopensearchserviceIndexRotationPeriod", + "documentation":"

The Amazon OpenSearch Service index rotation period. Index rotation appends a timestamp to IndexName to facilitate the expiration of old data.

" + }, + "BufferingHints":{ + "shape":"AmazonopensearchserviceBufferingHints", + "documentation":"

The buffering options. If no value is specified, AmazonopensearchBufferingHints object default values are used.

" + }, + "RetryOptions":{ + "shape":"AmazonopensearchserviceRetryOptions", + "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + }, "S3Update":{"shape":"S3DestinationUpdate"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"} - } + }, + "documentation":"

Describes an update for a destination in Amazon OpenSearch Service.

" }, "AmazonopensearchserviceDomainARN":{ "type":"string", @@ -312,8 +560,12 @@ "AmazonopensearchserviceRetryOptions":{ "type":"structure", "members":{ - "DurationInSeconds":{"shape":"AmazonopensearchserviceRetryDurationInSeconds"} - } + "DurationInSeconds":{ + "shape":"AmazonopensearchserviceRetryDurationInSeconds", + "documentation":"

After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + } + }, + "documentation":"

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service.

" }, "AmazonopensearchserviceS3BackupMode":{ "type":"string", @@ -441,7 +693,7 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

The name of the delivery stream. This name must be unique per AWS account in the same AWS Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.

" + "documentation":"

The name of the delivery stream. This name must be unique per Amazon Web Services account in the same Amazon Web Services Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.

" }, "DeliveryStreamType":{ "shape":"DeliveryStreamType", @@ -472,7 +724,10 @@ "shape":"ElasticsearchDestinationConfiguration", "documentation":"

The destination in Amazon ES. You can specify only one destination.

" }, - "AmazonopensearchserviceDestinationConfiguration":{"shape":"AmazonopensearchserviceDestinationConfiguration"}, + "AmazonopensearchserviceDestinationConfiguration":{ + "shape":"AmazonopensearchserviceDestinationConfiguration", + "documentation":"

The destination in Amazon OpenSearch Service. You can specify only one destination.

" + }, "SplunkDestinationConfiguration":{ "shape":"SplunkDestinationConfiguration", "documentation":"

The destination in Splunk. You can specify only one destination.

" @@ -483,7 +738,11 @@ }, "Tags":{ "shape":"TagDeliveryStreamInputTagList", - "documentation":"

A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

You can specify up to 50 tags when creating a delivery stream.

" + "documentation":"

A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

You can specify up to 50 tags when creating a delivery stream.

" + }, + "AmazonOpenSearchServerlessDestinationConfiguration":{ + "shape":"AmazonOpenSearchServerlessDestinationConfiguration", + "documentation":"

The destination in the Serverless offering for Amazon OpenSearch Service. You can specify only one destination.

" } } }, @@ -506,7 +765,7 @@ "members":{ "SchemaConfiguration":{ "shape":"SchemaConfiguration", - "documentation":"

Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.

" + "documentation":"

Specifies the Amazon Web Services Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.

" }, "InputFormatConfiguration":{ "shape":"InputFormatConfiguration", @@ -521,7 +780,7 @@ "documentation":"

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

" } }, - "documentation":"

Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the AWS Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Kinesis Data Firehose Record Format Conversion.

" + "documentation":"

Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Kinesis Data Firehose Record Format Conversion.

" }, "DataTableColumns":{ "type":"string", @@ -545,7 +804,7 @@ }, "AllowForceDelete":{ "shape":"BooleanObject", - "documentation":"

Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an AWS KMS issue, Kinesis Data Firehose keeps retrying the delete operation.

The default value is false.

" + "documentation":"

Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Kinesis Data Firehose keeps retrying the delete operation.

The default value is false.

" } } }, @@ -579,7 +838,7 @@ }, "DeliveryStreamARN":{ "shape":"DeliveryStreamARN", - "documentation":"

The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "DeliveryStreamStatus":{ "shape":"DeliveryStreamStatus", @@ -629,11 +888,11 @@ "members":{ "KeyARN":{ "shape":"AWSKMSKeyARN", - "documentation":"

If KeyType is CUSTOMER_MANAGED_CMK, this field contains the ARN of the customer managed CMK. If KeyType is AWS_OWNED_CMK, DeliveryStreamEncryptionConfiguration doesn't contain a value for KeyARN.

" + "documentation":"

If KeyType is CUSTOMER_MANAGED_CMK, this field contains the ARN of the customer managed CMK. If KeyType is Amazon Web Services_OWNED_CMK, DeliveryStreamEncryptionConfiguration doesn't contain a value for KeyARN.

" }, "KeyType":{ "shape":"KeyType", - "documentation":"

Indicates the type of customer master key (CMK) that is used for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs).

" + "documentation":"

Indicates the type of customer master key (CMK) that is used for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs).

" }, "Status":{ "shape":"DeliveryStreamEncryptionStatus", @@ -652,11 +911,11 @@ "members":{ "KeyARN":{ "shape":"AWSKMSKeyARN", - "documentation":"

If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to AWS_OWNED_CMK, Kinesis Data Firehose uses a service-account CMK.

" + "documentation":"

If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to Amazon Web Services_OWNED_CMK, Kinesis Data Firehose uses a service-account CMK.

" }, "KeyType":{ "shape":"KeyType", - "documentation":"

Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException.

To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the AWS Key Management Service developer guide.

" + "documentation":"

Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException.

To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.

" } }, "documentation":"

Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE).

" @@ -796,7 +1055,10 @@ "shape":"ElasticsearchDestinationDescription", "documentation":"

The destination in Amazon ES.

" }, - "AmazonopensearchserviceDestinationDescription":{"shape":"AmazonopensearchserviceDestinationDescription"}, + "AmazonopensearchserviceDestinationDescription":{ + "shape":"AmazonopensearchserviceDestinationDescription", + "documentation":"

The destination in Amazon OpenSearch Service.

" + }, "SplunkDestinationDescription":{ "shape":"SplunkDestinationDescription", "documentation":"

The destination in Splunk.

" @@ -804,6 +1066,10 @@ "HttpEndpointDestinationDescription":{ "shape":"HttpEndpointDestinationDescription", "documentation":"

Describes the specified HTTP endpoint destination.

" + }, + "AmazonOpenSearchServerlessDestinationDescription":{ + "shape":"AmazonOpenSearchServerlessDestinationDescription", + "documentation":"

The destination in the Serverless offering for Amazon OpenSearch Service.

" } }, "documentation":"

Describes the destination for a delivery stream.

" @@ -830,7 +1096,7 @@ "documentation":"

Specifies that the dynamic partitioning is enabled for this Kinesis Data Firehose delivery stream.

" } }, - "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations. For more information, see https://docs.aws.amazon.com/firehose/latest/dev/dynamic-partitioning.html

" + "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

" }, "ElasticsearchBufferingHints":{ "type":"structure", @@ -872,11 +1138,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

Specify either ClusterEndpoint or DomainARN.

" + "documentation":"

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeDomain, DescribeDomains, and DescribeDomainConfig after assuming the role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

Specify either ClusterEndpoint or DomainARN.

" }, "ClusterEndpoint":{ "shape":"ElasticsearchClusterEndpoint", @@ -904,7 +1170,7 @@ }, "S3BackupMode":{ "shape":"ElasticsearchS3BackupMode", - "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

You can't change this backup mode after you create the delivery stream.

" + "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

You can't change this backup mode after you create the delivery stream.

" }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -930,11 +1196,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"

The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.

" + "documentation":"

The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.

" }, "ClusterEndpoint":{ "shape":"ElasticsearchClusterEndpoint", @@ -946,7 +1212,7 @@ }, "TypeName":{ "shape":"ElasticsearchTypeName", - "documentation":"

The Elasticsearch type name. This applies to Elasticsearch 6.x and lower versions. For Elasticsearch 7.x, there's no value for TypeName.

" + "documentation":"

The Elasticsearch type name. This applies to Elasticsearch 6.x and lower versions. For Elasticsearch 7.x and OpenSearch Service 1.x, there's no value for TypeName.

" }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", @@ -988,11 +1254,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the IAM role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

Specify either ClusterEndpoint or DomainARN.

" + "documentation":"

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeDomain, DescribeDomains, and DescribeDomainConfig after assuming the IAM role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

Specify either ClusterEndpoint or DomainARN.

" }, "ClusterEndpoint":{ "shape":"ElasticsearchClusterEndpoint", @@ -1114,11 +1380,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -1162,7 +1428,7 @@ }, "DynamicPartitioningConfiguration":{ "shape":"DynamicPartitioningConfiguration", - "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations. For more information, see https://docs.aws.amazon.com/firehose/latest/dev/dynamic-partitioning.html

" + "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

" } }, "documentation":"

Describes the configuration of a destination in Amazon S3.

" @@ -1179,11 +1445,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -1227,7 +1493,7 @@ }, "DynamicPartitioningConfiguration":{ "shape":"DynamicPartitioningConfiguration", - "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations. For more information, see https://docs.aws.amazon.com/firehose/latest/dev/dynamic-partitioning.html

" + "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

" } }, "documentation":"

Describes a destination in Amazon S3.

" @@ -1237,11 +1503,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -1285,7 +1551,7 @@ }, "DynamicPartitioningConfiguration":{ "shape":"DynamicPartitioningConfiguration", - "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations. For more information, see https://docs.aws.amazon.com/firehose/latest/dev/dynamic-partitioning.html

" + "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

" } }, "documentation":"

Describes an update for a destination in Amazon S3.

" @@ -1637,7 +1903,7 @@ "members":{ "AWSKMSKeyARN":{ "shape":"AWSKMSKeyARN", - "documentation":"

The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the encryption key. Must belong to the same Amazon Web Services Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" } }, "documentation":"

Describes an encryption key for a destination in Amazon S3.

" @@ -1668,7 +1934,7 @@ }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The ARN of the role that provides access to the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format.

" + "documentation":"

The ARN of the role that provides access to the source Kinesis data stream. For more information, see Amazon Web Services Identity and Access Management (IAM) ARN Format.

" } }, "documentation":"

The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream.

" @@ -1682,7 +1948,7 @@ }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The ARN of the role used by the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format.

" + "documentation":"

The ARN of the role used by the source Kinesis data stream. For more information, see Amazon Web Services Identity and Access Management (IAM) ARN Format.

" }, "DeliveryStartTimestamp":{ "shape":"DeliveryStartTimestamp", @@ -2028,14 +2294,14 @@ "members":{ "ParameterName":{ "shape":"ProcessorParameterName", - "documentation":"

The name of the parameter.

" + "documentation":"

The name of the parameter. Currently the following default values are supported: 3 for NumberOfRetries and 60 for the BufferIntervalInSeconds. The BufferSizeInMBs ranges between 0.2 MB and up to 3MB. The default buffering hint is 1MB for all destinations, except Splunk. For Splunk, the default buffering hint is 256 KB.

" }, "ParameterValue":{ "shape":"ProcessorParameterValue", "documentation":"

The parameter value.

" } }, - "documentation":"

Describes the processor parameter.

" + "documentation":"

Describes the processor parameter.

" }, "ProcessorParameterList":{ "type":"list", @@ -2202,7 +2468,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "ClusterJDBCURL":{ "shape":"ClusterJDBCURL", @@ -2259,7 +2525,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "ClusterJDBCURL":{ "shape":"ClusterJDBCURL", @@ -2305,7 +2571,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "ClusterJDBCURL":{ "shape":"ClusterJDBCURL", @@ -2431,11 +2697,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -2476,11 +2742,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -2514,11 +2780,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -2552,23 +2818,23 @@ "members":{ "RoleARN":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the RoleARN property is required and its value must be specified.

" + "documentation":"

The role that Kinesis Data Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the RoleARN property is required and its value must be specified.

" }, "CatalogId":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.

" + "documentation":"

The ID of the Amazon Web Services Glue Data Catalog. If you don't supply this, the Amazon Web Services account ID is used by default.

" }, "DatabaseName":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

Specifies the name of the AWS Glue database that contains the schema for the output data.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the DatabaseName property is required and its value must be specified.

" + "documentation":"

Specifies the name of the Amazon Web Services Glue database that contains the schema for the output data.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the DatabaseName property is required and its value must be specified.

" }, "TableName":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

Specifies the AWS Glue table that contains the column information that constitutes your data schema.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the TableName property is required and its value must be specified.

" + "documentation":"

Specifies the Amazon Web Services Glue table that contains the column information that constitutes your data schema.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the TableName property is required and its value must be specified.

" }, "Region":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

If you don't specify an AWS Region, the default is the current Region.

" + "documentation":"

If you don't specify an Amazon Web Services Region, the default is the current Region.

" }, "VersionId":{ "shape":"NonEmptyStringWithoutWhitespace", @@ -2939,7 +3205,10 @@ "shape":"ElasticsearchDestinationUpdate", "documentation":"

Describes an update for a destination in Amazon ES.

" }, - "AmazonopensearchserviceDestinationUpdate":{"shape":"AmazonopensearchserviceDestinationUpdate"}, + "AmazonopensearchserviceDestinationUpdate":{ + "shape":"AmazonopensearchserviceDestinationUpdate", + "documentation":"

Describes an update for a destination in Amazon OpenSearch Service.

" + }, "SplunkDestinationUpdate":{ "shape":"SplunkDestinationUpdate", "documentation":"

Describes an update for a destination in Splunk.

" @@ -2947,6 +3216,10 @@ "HttpEndpointDestinationUpdate":{ "shape":"HttpEndpointDestinationUpdate", "documentation":"

Describes an update to the specified HTTP endpoint destination.

" + }, + "AmazonOpenSearchServerlessDestinationUpdate":{ + "shape":"AmazonOpenSearchServerlessDestinationUpdate", + "documentation":"

Describes an update for a destination in the Serverless offering for Amazon OpenSearch Service.

" } } }, @@ -3014,5 +3287,5 @@ "documentation":"

The details of the VPC of the Amazon ES destination.

" } }, - "documentation":"Amazon Kinesis Data Firehose API Reference

Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), Amazon Redshift, and Splunk.

" + "documentation":"Amazon Kinesis Data Firehose API Reference

Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.

" } diff --git a/botocore/data/kms/2014-11-01/endpoint-rule-set-1.json b/botocore/data/kms/2014-11-01/endpoint-rule-set-1.json index 51252582fb..4586854bd9 100644 --- a/botocore/data/kms/2014-11-01/endpoint-rule-set-1.json +++ b/botocore/data/kms/2014-11-01/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": false, + "required": true, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, diff --git a/botocore/data/kms/2014-11-01/service-2.json b/botocore/data/kms/2014-11-01/service-2.json index a6d6be972c..3741802201 100644 --- a/botocore/data/kms/2014-11-01/service-2.json +++ b/botocore/data/kms/2014-11-01/service-2.json @@ -45,7 +45,7 @@ {"shape":"KMSInternalException"}, {"shape":"CloudHsmClusterInvalidConfigurationException"} ], - "documentation":"

Connects or reconnects a custom key store to its associated CloudHSM cluster.

The custom key store must be connected before you can create KMS keys in the key store or use the KMS keys it contains. You can disconnect and reconnect a custom key store at any time.

To connect a custom key store, its associated CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation. Also, the kmsuser crypto user (CU) must not be logged into the cluster. This prevents KMS from using this account to log in.

The connection process can take an extended amount of time to complete; up to 20 minutes. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

During the connection process, KMS finds the CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its password.

The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

If you are having trouble connecting or disconnecting a custom key store, see Troubleshooting a Custom Key Store in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:ConnectCustomKeyStore (IAM policy)

Related operations

" + "documentation":"

Connects or reconnects a custom key store to its backing key store. For an CloudHSM key store, ConnectCustomKeyStore connects the key store to its associated CloudHSM cluster. For an external key store, ConnectCustomKeyStore connects the key store to the external key store proxy that communicates with your external key manager.

The custom key store must be connected before you can create KMS keys in the key store or use the KMS keys it contains. You can disconnect and reconnect a custom key store at any time.

The connection process for a custom key store can take an extended amount of time to complete. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

CloudHSM key store

During the connection process for an CloudHSM key store, KMS finds the CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its password.

To connect an CloudHSM key store, its associated CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation. Also, the kmsuser crypto user (CU) must not be logged into the cluster. This prevents KMS from using this account to log in.

If you are having trouble connecting or disconnecting a CloudHSM key store, see Troubleshooting an CloudHSM key store in the Key Management Service Developer Guide.

External key store

When you connect an external key store that uses public endpoint connectivity, KMS tests its ability to communicate with your external key manager by sending a request via the external key store proxy.

When you connect to an external key store that uses VPC endpoint service connectivity, KMS establishes the networking elements that it needs to communicate with your external key manager via the external key store proxy. This includes creating an interface endpoint to the VPC endpoint service and a private hosted zone for traffic between KMS and the VPC endpoint service.

To connect an external key store, KMS must be able to connect to the external key store proxy, the external key store proxy must be able to communicate with your external key manager, and the external key manager must be available for cryptographic operations.

If you are having trouble connecting or disconnecting an external key store, see Troubleshooting an external key store in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:ConnectCustomKeyStore (IAM policy)

Related operations

" }, "CreateAlias":{ "name":"CreateAlias", @@ -63,7 +63,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Creates a friendly name for a KMS key.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

You can use an alias to identify a KMS key in the KMS console, in the DescribeKey operation and in cryptographic operations, such as Encrypt and GenerateDataKey. You can also change the KMS key that's associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any time. These operations don't affect the underlying KMS key.

You can associate the alias with any customer managed key in the same Amazon Web Services Region. Each alias is associated with only one KMS key at a time, but a KMS key can have multiple aliases. A valid KMS key is required. You can't create an alias without a KMS key.

The alias must be unique in the account and Region, but you can have aliases with the same name in different Regions. For detailed information about aliases, see Using aliases in the Key Management Service Developer Guide.

This operation does not return a response. To get the alias that you created, use the ListAliases operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

" + "documentation":"

Creates a friendly name for a KMS key.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

You can use an alias to identify a KMS key in the KMS console, in the DescribeKey operation and in cryptographic operations, such as Encrypt and GenerateDataKey. You can also change the KMS key that's associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any time. These operations don't affect the underlying KMS key.

You can associate the alias with any customer managed key in the same Amazon Web Services Region. Each alias is associated with only one KMS key at a time, but a KMS key can have multiple aliases. A valid KMS key is required. You can't create an alias without a KMS key.

The alias must be unique in the account and Region, but you can have aliases with the same name in different Regions. For detailed information about aliases, see Using aliases in the Key Management Service Developer Guide.

This operation does not return a response. To get the alias that you created, use the ListAliases operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

" }, "CreateCustomKeyStore":{ "name":"CreateCustomKeyStore", @@ -80,9 +80,19 @@ {"shape":"KMSInternalException"}, {"shape":"CloudHsmClusterNotActiveException"}, {"shape":"IncorrectTrustAnchorException"}, - {"shape":"CloudHsmClusterInvalidConfigurationException"} - ], - "documentation":"

Creates a custom key store that is associated with an CloudHSM cluster that you own and manage.

This operation is part of the custom key store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Before you create the custom key store, you must assemble the required elements, including an CloudHSM cluster that fulfills the requirements for a custom key store. For details about the required elements, see Assemble the Prerequisites in the Key Management Service Developer Guide.

When the operation completes successfully, it returns the ID of the new custom key store. Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM cluster. Even if you are not going to use your custom key store immediately, you might want to connect it to verify that all settings are correct and then disconnect it until you are ready to use it.

For help with failures, see Troubleshooting a Custom Key Store in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:CreateCustomKeyStore (IAM policy).

Related operations:

" + {"shape":"CloudHsmClusterInvalidConfigurationException"}, + {"shape":"LimitExceededException"}, + {"shape":"XksProxyUriInUseException"}, + {"shape":"XksProxyUriEndpointInUseException"}, + {"shape":"XksProxyUriUnreachableException"}, + {"shape":"XksProxyIncorrectAuthenticationCredentialException"}, + {"shape":"XksProxyVpcEndpointServiceInUseException"}, + {"shape":"XksProxyVpcEndpointServiceNotFoundException"}, + {"shape":"XksProxyVpcEndpointServiceInvalidConfigurationException"}, + {"shape":"XksProxyInvalidResponseException"}, + {"shape":"XksProxyInvalidConfigurationException"} + ], + "documentation":"

Creates a custom key store backed by a key store that you own and manage. When you use a KMS key in a custom key store for a cryptographic operation, the cryptographic operation is actually performed in your key store using your keys. KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key store proxy and external key manager outside of Amazon Web Services.

This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

Before you create the custom key store, the required elements must be in place and operational. We recommend that you use the test tools that KMS provides to verify the configuration your external key store proxy. For details about the required elements and verification tests, see Assemble the prerequisites (for CloudHSM key stores) or Assemble the prerequisites (for external key stores) in the Key Management Service Developer Guide.

To create a custom key store, use the following parameters.

For external key stores:

Some external key managers provide a simpler method for creating an external key store. For details, see your external key manager documentation.

When creating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot use a proxy configuration with the CreateCustomKeyStore operation. However, you can use the values in the file to help you determine the correct values for the CreateCustomKeyStore parameters.

When the operation completes successfully, it returns the ID of the new custom key store. Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect a new CloudHSM key store to its CloudHSM cluster, or to connect a new external key store to the external key store proxy for your external key manager. Even if you are not going to use your custom key store immediately, you might want to connect it to verify that all settings are correct and then disconnect it until you are ready to use it.

For help with failures, see Troubleshooting a custom key store in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:CreateCustomKeyStore (IAM policy).

Related operations:

" }, "CreateGrant":{ "name":"CreateGrant", @@ -122,9 +132,12 @@ {"shape":"TagException"}, {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"CustomKeyStoreInvalidStateException"}, - {"shape":"CloudHsmClusterInvalidConfigurationException"} + {"shape":"CloudHsmClusterInvalidConfigurationException"}, + {"shape":"XksKeyInvalidConfigurationException"}, + {"shape":"XksKeyAlreadyInUseException"}, + {"shape":"XksKeyNotFoundException"} ], - "documentation":"

Creates a unique customer managed KMS key in your Amazon Web Services account and Region.

In addition to the required parameters, you can use the optional parameters to specify a key policy, description, tags, and other useful elements for any key type.

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

To create different types of KMS keys, use the following guidance:

Symmetric encryption KMS key

To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, and the default value for KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption KMS key. For technical details, see SYMMETRIC_DEFAULT key spec in the Key Management Service Developer Guide.

If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

Asymmetric KMS keys

To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

HMAC KMS key

To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the CreateKey operation returns an UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are supported, see HMAC keys in KMS in the Key Management Service Developer Guide.

Multi-Region primary keys
Imported key material

To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

To import your own key material, begin by creating a symmetric encryption KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key material into any other type of KMS key.

To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

Custom key store

To create a symmetric encryption KMS key in a custom key store, use the CustomKeyStoreId parameter to specify the custom key store. You must also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

Custom key stores support only symmetric encryption KMS keys. You cannot create an HMAC KMS key or an asymmetric KMS key in a custom key store. For information about custom key stores in KMS see Custom key stores in KMS in the Key Management Service Developer Guide .

Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

Related operations:

" + "documentation":"

Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources.

A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state. For details, see Managing keys in the Key Management Service Developer Guide

Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties.

KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

To create different types of KMS keys, use the following guidance:

Symmetric encryption KMS key

By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance.

To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material.

If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

Asymmetric KMS keys

To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

HMAC KMS key

To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the CreateKey operation returns an UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are supported, see HMAC keys in KMS in the Key Management Service Developer Guide.

Multi-Region primary keys
Imported key material

To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

To import your own key material into a KMS key, begin by creating a symmetric encryption KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key material into any other type of KMS key.

To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

Custom key store

A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys.

KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager.

Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation.

To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store.

To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key.

Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

Related operations:

" }, "Decrypt":{ "name":"Decrypt", @@ -146,7 +159,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using IAM policies. Otherwise, you might create an IAM user policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Decrypt (key policy)

Related operations:

" + "documentation":"

Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt symmetric ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using IAM policies. Otherwise, you might create an IAM user policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Decrypt (key policy)

Related operations:

" }, "DeleteAlias":{ "name":"DeleteAlias", @@ -161,7 +174,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Deletes the specified alias.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

Because an alias is not a property of a KMS key, you can delete and change the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys, use the ListAliases operation.

Each KMS key can have multiple aliases. To change the alias of a KMS key, use DeleteAlias to delete the current alias and CreateAlias to create a new alias. To associate an existing alias with a different KMS key, call UpdateAlias.

Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

" + "documentation":"

Deletes the specified alias.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

Because an alias is not a property of a KMS key, you can delete and change the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys, use the ListAliases operation.

Each KMS key can have multiple aliases. To change the alias of a KMS key, use DeleteAlias to delete the current alias and CreateAlias to create a new alias. To associate an existing alias with a different KMS key, call UpdateAlias.

Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

" }, "DeleteCustomKeyStore":{ "name":"DeleteCustomKeyStore", @@ -177,7 +190,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Deletes a custom key store. This operation does not delete the CloudHSM cluster that is associated with the custom key store, or affect any users or keys in the cluster.

The custom key store that you delete cannot contain any KMS keys. Before deleting the key store, verify that you will never need to use any of the KMS keys in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. When the scheduled waiting period expires, the ScheduleKeyDeletion operation deletes the KMS keys. Then it makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore to disconnect the key store from KMS. Then, you can delete the custom key store.

Instead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from KMS. While the key store is disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to delete KMS keys and you can reconnect a disconnected custom key store at any time.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the custom key store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DeleteCustomKeyStore (IAM policy)

Related operations:

" + "documentation":"

Deletes a custom key store. This operation does not affect any backing elements of the custom key store. It does not delete the CloudHSM cluster that is associated with an CloudHSM key store, or affect any users or keys in the cluster. For an external key store, it does not affect the external key store proxy, external key manager, or any external keys.

This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

The custom key store that you delete cannot contain any KMS keys. Before deleting the key store, verify that you will never need to use any of the KMS keys in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. After the required waiting period expires and all KMS keys are deleted from the custom key store, use DisconnectCustomKeyStore to disconnect the key store from KMS. Then, you can delete the custom key store.

For keys in an CloudHSM key store, the ScheduleKeyDeletion operation makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. KMS never creates, manages, or deletes cryptographic keys in the external key manager associated with an external key store. You must manage them using your external key manager tools.

Instead of deleting the custom key store, consider using the DisconnectCustomKeyStore operation to disconnect the custom key store from its backing key store. While the key store is disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to delete KMS keys and you can reconnect a disconnected custom key store at any time.

If the operation succeeds, it returns a JSON object with no properties.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DeleteCustomKeyStore (IAM policy)

Related operations:

" }, "DeleteImportedKeyMaterial":{ "name":"DeleteImportedKeyMaterial", @@ -209,7 +222,7 @@ {"shape":"InvalidMarkerException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Gets information about custom key stores in the account and Region.

This operation is part of the custom key store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

By default, this operation returns information about all custom key stores in the account and Region. To get only information about a particular custom key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both).

To determine whether the custom key store is connected to its CloudHSM cluster, use the ConnectionState element in the response. If an attempt to connect the custom key store failed, the ConnectionState value is FAILED and the ConnectionErrorCode element in the response indicates the cause of the failure. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

Custom key stores have a DISCONNECTED connection state if the key store has never been connected or you use the DisconnectCustomKeyStore operation to disconnect it. If your custom key store state is CONNECTED but you are having trouble using it, make sure that its associated CloudHSM cluster is active and contains the minimum number of HSMs required for the operation, if any.

For help repairing your custom key store, see the Troubleshooting Custom Key Stores topic in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DescribeCustomKeyStores (IAM policy)

Related operations:

" + "documentation":"

Gets information about custom key stores in the account and Region.

This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

By default, this operation returns information about all custom key stores in the account and Region. To get only information about a particular custom key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both).

To determine whether the custom key store is connected to its CloudHSM cluster or external key store proxy, use the ConnectionState element in the response. If an attempt to connect the custom key store failed, the ConnectionState value is FAILED and the ConnectionErrorCode element in the response indicates the cause of the failure. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

Custom key stores have a DISCONNECTED connection state if the key store has never been connected or you used the DisconnectCustomKeyStore operation to disconnect it. Otherwise, the connection state is CONNECTED. If your custom key store connection state is CONNECTED but you are having trouble using it, verify that the backing store is active and available. For an CloudHSM key store, verify that the associated CloudHSM cluster is active and contains the minimum number of HSMs required for the operation, if any. For an external key store, verify that the external key store proxy and its associated external key manager are reachable and enabled.

For help repairing your CloudHSM key store, see the Troubleshooting CloudHSM key stores. For help repairing your external key store, see the Troubleshooting external key stores. Both topics are in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DescribeCustomKeyStores (IAM policy)

Related operations:

" }, "DescribeKey":{ "name":"DescribeKey", @@ -225,7 +238,7 @@ {"shape":"DependencyTimeoutException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Provides detailed information about a KMS key. You can run DescribeKey on a customer managed key or an Amazon Web Services managed key.

This detailed information includes the key ARN, creation date (and deletion date, if applicable), the key state, and the origin and expiration date (if any) of the key material. It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports. For KMS keys in custom key stores, it includes information about the custom key store, such as the key store ID and the CloudHSM cluster ID. For multi-Region keys, it displays the primary key and all related replica keys.

DescribeKey does not return the following information:

In general, DescribeKey is a non-mutating operation. It returns data about KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to create Amazon Web Services managed keys from a predefined Amazon Web Services alias with no key ID.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:DescribeKey (key policy)

Related operations:

" + "documentation":"

Provides detailed information about a KMS key. You can run DescribeKey on a customer managed key or an Amazon Web Services managed key.

This detailed information includes the key ARN, creation date (and deletion date, if applicable), the key state, and the origin and expiration date (if any) of the key material. It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports. For multi-Region keys, it displays the primary key and all related replica keys. For KMS keys in CloudHSM key stores, it includes information about the custom key store, such as the key store ID and the CloudHSM cluster ID. For KMS key in external key stores, it includes the custom key store ID and the ID and status of the associated external key.

DescribeKey does not return the following information:

In general, DescribeKey is a non-mutating operation. It returns data about KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to create Amazon Web Services managed keys from a predefined Amazon Web Services alias with no key ID.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:DescribeKey (key policy)

Related operations:

" }, "DisableKey":{ "name":"DisableKey", @@ -259,7 +272,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

Disables automatic rotation of the key material of the specified symmetric encryption KMS key.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

You can enable (EnableKeyRotation) and disable automatic rotation of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material for every year. Rotation of Amazon Web Services owned KMS keys varies.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DisableKeyRotation (key policy)

Related operations:

" + "documentation":"

Disables automatic rotation of the key material of the specified symmetric encryption KMS key.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

You can enable (EnableKeyRotation) and disable automatic rotation of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material for every year. Rotation of Amazon Web Services owned KMS keys varies.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DisableKeyRotation (key policy)

Related operations:

" }, "DisconnectCustomKeyStore":{ "name":"DisconnectCustomKeyStore", @@ -274,7 +287,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Disconnects the custom key store from its associated CloudHSM cluster. While a custom key store is disconnected, you can manage the custom key store and its KMS keys, but you cannot create or use KMS keys in the custom key store. You can reconnect the custom key store at any time.

While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the custom key store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

Related operations:

" + "documentation":"

Disconnects the custom key store from its backing key store. This operation disconnects an CloudHSM key store from its associated CloudHSM cluster or disconnects an external key store from the external key store proxy that communicates with your external key manager.

This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

While a custom key store is disconnected, you can manage the custom key store and its KMS keys, but you cannot create or use its KMS keys. You can reconnect the custom key store at any time.

While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

When you disconnect a custom key store, its ConnectionState changes to Disconnected. To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

If the operation succeeds, it returns a JSON object with no properties.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

Related operations:

" }, "EnableKey":{ "name":"EnableKey", @@ -309,7 +322,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

Enables automatic rotation of the key material of the specified symmetric encryption KMS key.

When you enable automatic rotation of acustomer managed KMS key, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer managed KMS key, use the DisableKeyRotation operation.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

You cannot enable or disable automatic rotation Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys varies.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years (approximately 1,095 days) to every year (approximately 365 days).

New Amazon Web Services managed keys are automatically rotated one year after they are created, and approximately every year thereafter.

Existing Amazon Web Services managed keys are automatically rotated one year after their most recent rotation, and every year thereafter.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:EnableKeyRotation (key policy)

Related operations:

" + "documentation":"

Enables automatic rotation of the key material of the specified symmetric encryption KMS key.

When you enable automatic rotation of acustomer managed KMS key, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer managed KMS key, use the DisableKeyRotation operation.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

You cannot enable or disable automatic rotation Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys varies.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years (approximately 1,095 days) to every year (approximately 365 days).

New Amazon Web Services managed keys are automatically rotated one year after they are created, and approximately every year thereafter.

Existing Amazon Web Services managed keys are automatically rotated one year after their most recent rotation, and every year thereafter.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:EnableKeyRotation (key policy)

Related operations:

" }, "Encrypt":{ "name":"Encrypt", @@ -411,7 +424,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

Related operations:

" + "documentation":"

Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

To generate an SM4 data key (China Regions only), specify a KeySpec value of AES_128 or NumberOfBytes value of 128. The symmetric encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

Related operations:

" }, "GenerateMac":{ "name":"GenerateMac", @@ -430,7 +443,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. The MAC algorithm computes the HMAC for the message and the key as described in RFC 2104.

You can use the HMAC that this operation generates with the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

Best practices recommend that you limit the time during which any signing mechanism, including an HMAC, is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. HMAC tags do not include a timestamp, but you can include a timestamp in the token or message to help you detect when its time to refresh the HMAC.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateMac (key policy)

Related operations: VerifyMac

" + "documentation":"

Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

You can use value that GenerateMac returns in the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. You can also use the raw result to implement HMAC-based algorithms such as key derivation functions. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

Best practices recommend that you limit the time during which any signing mechanism, including an HMAC, is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. HMAC tags do not include a timestamp, but you can include a timestamp in the token or message to help you detect when its time to refresh the HMAC.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateMac (key policy)

Related operations: VerifyMac

" }, "GenerateRandom":{ "name":"GenerateRandom", @@ -443,10 +456,11 @@ "errors":[ {"shape":"DependencyTimeoutException"}, {"shape":"KMSInternalException"}, + {"shape":"UnsupportedOperationException"}, {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"CustomKeyStoreInvalidStateException"} ], - "documentation":"

Returns a random byte string that is cryptographically secure.

You must use the NumberOfBytes parameter to specify the length of the random byte string. There is no default value for string length.

By default, the random byte string is generated in KMS. To generate the byte string in the CloudHSM cluster that is associated with a custom key store, specify the custom key store ID.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

For more information about entropy and random number generation, see Key Management Service Cryptographic Details.

Cross-account use: Not applicable. GenerateRandom does not use any account-specific resources, such as KMS keys.

Required permissions: kms:GenerateRandom (IAM policy)

" + "documentation":"

Returns a random byte string that is cryptographically secure.

You must use the NumberOfBytes parameter to specify the length of the random byte string. There is no default value for string length.

By default, the random byte string is generated in KMS. To generate the byte string in the CloudHSM cluster associated with an CloudHSM key store, use the CustomKeyStoreId parameter.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

For more information about entropy and random number generation, see Key Management Service Cryptographic Details.

Cross-account use: Not applicable. GenerateRandom does not use any account-specific resources, such as KMS keys.

Required permissions: kms:GenerateRandom (IAM policy)

" }, "GetKeyPolicy":{ "name":"GetKeyPolicy", @@ -481,7 +495,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

Gets a Boolean value that indicates whether automatic rotation of the key material is enabled for the specified KMS key.

When you enable automatic rotation for customer managed KMS keys, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key..

You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation) of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material in Amazon Web Services managed KMS keys every year. The key rotation status for Amazon Web Services managed KMS keys is always true.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:GetKeyRotationStatus (key policy)

Related operations:

" + "documentation":"

Gets a Boolean value that indicates whether automatic rotation of the key material is enabled for the specified KMS key.

When you enable automatic rotation for customer managed KMS keys, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key..

You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation) of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material in Amazon Web Services managed KMS keys every year. The key rotation status for Amazon Web Services managed KMS keys is always true.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:GetKeyRotationStatus (key policy)

Related operations:

" }, "GetParametersForImport":{ "name":"GetParametersForImport", @@ -499,7 +513,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Returns the items you need to import key material into a symmetric encryption KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

This operation returns a public key and an import token. Use the public key to encrypt the symmetric key material. Store the import token to send with a subsequent ImportKeyMaterial request.

You must specify the key ID of the symmetric encryption KMS key into which you will import key material. This KMS key's Origin must be EXTERNAL. You must also specify the wrapping algorithm and type of wrapping key (public key) that you will use to encrypt the key material. You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account.

To import key material, you must use the public key and import token from the same response. These items are valid for 24 hours. The expiration date and time appear in the GetParametersForImport response. You cannot use an expired token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:GetParametersForImport (key policy)

Related operations:

" + "documentation":"

Returns the items you need to import key material into a symmetric encryption KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

This operation returns a public key and an import token. Use the public key to encrypt the symmetric key material. Store the import token to send with a subsequent ImportKeyMaterial request.

You must specify the key ID of the symmetric encryption KMS key into which you will import key material. The KMS key Origin must be EXTERNAL. You must also specify the wrapping algorithm and type of wrapping key (public key) that you will use to encrypt the key material. You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account.

To import key material, you must use the public key and import token from the same response. These items are valid for 24 hours. The expiration date and time appear in the GetParametersForImport response. You cannot use an expired token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:GetParametersForImport (key policy)

Related operations:

" }, "GetPublicKey":{ "name":"GetPublicKey", @@ -521,7 +535,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS.

To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GetPublicKey (key policy)

Related operations: CreateKey

" + "documentation":"

Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS.

To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GetPublicKey (key policy)

Related operations: CreateKey

" }, "ImportKeyMaterial":{ "name":"ImportKeyMaterial", @@ -543,7 +557,7 @@ {"shape":"ExpiredImportTokenException"}, {"shape":"InvalidImportTokenException"} ], - "documentation":"

Imports key material into an existing symmetric encryption KMS key that was created without key material. After you successfully import key material into a KMS key, you can reimport the same key material into that KMS key, but you cannot import different key material.

You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account. For more information about creating KMS keys with no key material and then importing key material, see Importing Key Material in the Key Management Service Developer Guide.

Before using this operation, call GetParametersForImport. Its response includes a public key and an import token. Use the public key to encrypt the key material. Then, submit the import token from the same GetParametersForImport response.

When calling this operation, you must specify the following values:

When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key.

If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see How To Import Key Material in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:ImportKeyMaterial (key policy)

Related operations:

" + "documentation":"

Imports key material into an existing symmetric encryption KMS key that was created without key material. After you successfully import key material into a KMS key, you can reimport the same key material into that KMS key, but you cannot import different key material.

You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account. For more information about creating KMS keys with no key material and then importing key material, see Importing Key Material in the Key Management Service Developer Guide.

Before using this operation, call GetParametersForImport. Its response includes a public key and an import token. Use the public key to encrypt the key material. Then, submit the import token from the same GetParametersForImport response.

When calling this operation, you must specify the following values:

When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key.

If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see How To Import Key Material in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:ImportKeyMaterial (key policy)

Related operations:

" }, "ListAliases":{ "name":"ListAliases", @@ -685,7 +699,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this operation to change the KMS key under which data is encrypted, such as when you manually rotate a KMS key or change the KMS key that protects a ciphertext. You can also use it to reencrypt ciphertext under the same KMS key, such as to change the encryption context of a ciphertext.

The ReEncrypt operation can decrypt ciphertext that was encrypted by using a KMS key in an KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric KMS key outside of KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. The source KMS key and destination KMS key can be in different Amazon Web Services accounts. Either or both KMS keys can be in a different account than the caller. To specify a KMS key in a different account, you must use its key ARN or alias ARN.

Required permissions:

To permit reencryption from or to a KMS key, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a KMS key. But you must include it manually when you create a KMS key programmatically or when you use the PutKeyPolicy operation to set a key policy.

Related operations:

" + "documentation":"

Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this operation to change the KMS key under which data is encrypted, such as when you manually rotate a KMS key or change the KMS key that protects a ciphertext. You can also use it to reencrypt ciphertext under the same KMS key, such as to change the encryption context of a ciphertext.

The ReEncrypt operation can decrypt ciphertext that was encrypted by using a KMS key in an KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric KMS key outside of KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. The source KMS key and destination KMS key can be in different Amazon Web Services accounts. Either or both KMS keys can be in a different account than the caller. To specify a KMS key in a different account, you must use its key ARN or alias ARN.

Required permissions:

To permit reencryption from or to a KMS key, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a KMS key. But you must include it manually when you create a KMS key programmatically or when you use the PutKeyPolicy operation to set a key policy.

Related operations:

" }, "ReplicateKey":{ "name":"ReplicateKey", @@ -759,7 +773,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key.) To prevent the use of a KMS key without deleting it, use DisableKey.

If you schedule deletion of a KMS key from a custom key store, when the waiting period expires, ScheduleKeyDeletion deletes the KMS key from KMS. Then KMS makes a best effort to delete the key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:ScheduleKeyDeletion (key policy)

Related operations

" + "documentation":"

Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key.) To prevent the use of a KMS key without deleting it, use DisableKey.

You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

When KMS deletes a KMS key from an CloudHSM key store, it makes a best effort to delete the associated key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. Deleting a KMS key from an external key store has no effect on the associated external key. However, for both types of custom key stores, deleting a KMS key is destructive and irreversible. You cannot decrypt ciphertext encrypted under the KMS key by using only its associated external key or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a new KMS key with the same key material.

For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:ScheduleKeyDeletion (key policy)

Related operations

" }, "Sign":{ "name":"Sign", @@ -796,7 +810,7 @@ {"shape":"LimitExceededException"}, {"shape":"TagException"} ], - "documentation":"

Adds or edits tags on a customer managed key.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

You can use this operation to tag a customer managed key, but you cannot tag an Amazon Web Services managed key, an Amazon Web Services owned key, a custom key store, or an alias.

You can also add tags to a KMS key while creating it (CreateKey) or replicating it (ReplicateKey).

For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:TagResource (key policy)

Related operations

" + "documentation":"

Adds or edits tags on a customer managed key.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

You can use this operation to tag a customer managed key, but you cannot tag an Amazon Web Services managed key, an Amazon Web Services owned key, a custom key store, or an alias.

You can also add tags to a KMS key while creating it (CreateKey) or replicating it (ReplicateKey).

For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:TagResource (key policy)

Related operations

" }, "UntagResource":{ "name":"UntagResource", @@ -812,7 +826,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"TagException"} ], - "documentation":"

Deletes tags from a customer managed key. To delete a tag, specify the tag key and the KMS key.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

When it succeeds, the UntagResource operation doesn't return any output. Also, if the specified tag key isn't found on the KMS key, it doesn't throw an exception or return a response. To confirm that the operation worked, use the ListResourceTags operation.

For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:UntagResource (key policy)

Related operations

" + "documentation":"

Deletes tags from a customer managed key. To delete a tag, specify the tag key and the KMS key.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

When it succeeds, the UntagResource operation doesn't return any output. Also, if the specified tag key isn't found on the KMS key, it doesn't throw an exception or return a response. To confirm that the operation worked, use the ListResourceTags operation.

For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:UntagResource (key policy)

Related operations

" }, "UpdateAlias":{ "name":"UpdateAlias", @@ -828,7 +842,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Associates an existing KMS alias with a different KMS key. Each alias is associated with only one KMS key at a time, although a KMS key can have multiple aliases. The alias and the KMS key must be in the same Amazon Web Services account and Region.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

The current and new KMS key must be the same type (both symmetric or both asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY). This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of KMS key, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

Because an alias is not a property of a KMS key, you can create, update, and delete the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys in the account, use the ListAliases operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

" + "documentation":"

Associates an existing KMS alias with a different KMS key. Each alias is associated with only one KMS key at a time, although a KMS key can have multiple aliases. The alias and the KMS key must be in the same Amazon Web Services account and Region.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

The current and new KMS key must be the same type (both symmetric or both asymmetric or both HMAC), and they must have the same key usage. This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of KMS key, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

Because an alias is not a property of a KMS key, you can create, update, and delete the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys in the account, use the ListAliases operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

" }, "UpdateCustomKeyStore":{ "name":"UpdateCustomKeyStore", @@ -846,9 +860,18 @@ {"shape":"CustomKeyStoreInvalidStateException"}, {"shape":"KMSInternalException"}, {"shape":"CloudHsmClusterNotActiveException"}, - {"shape":"CloudHsmClusterInvalidConfigurationException"} - ], - "documentation":"

Changes the properties of a custom key store. Use the CustomKeyStoreId parameter to identify the custom key store you want to edit. Use the remaining parameters to change the properties of the custom key store.

You can only update a custom key store that is disconnected. To disconnect the custom key store, use DisconnectCustomKeyStore. To reconnect the custom key store after the update completes, use ConnectCustomKeyStore. To find the connection state of a custom key store, use the DescribeCustomKeyStores operation.

The CustomKeyStoreId parameter is required in all commands. Use the other parameters of UpdateCustomKeyStore to edit your key store settings.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the custom key store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:UpdateCustomKeyStore (IAM policy)

Related operations:

" + {"shape":"CloudHsmClusterInvalidConfigurationException"}, + {"shape":"XksProxyUriInUseException"}, + {"shape":"XksProxyUriEndpointInUseException"}, + {"shape":"XksProxyUriUnreachableException"}, + {"shape":"XksProxyIncorrectAuthenticationCredentialException"}, + {"shape":"XksProxyVpcEndpointServiceInUseException"}, + {"shape":"XksProxyVpcEndpointServiceNotFoundException"}, + {"shape":"XksProxyVpcEndpointServiceInvalidConfigurationException"}, + {"shape":"XksProxyInvalidResponseException"}, + {"shape":"XksProxyInvalidConfigurationException"} + ], + "documentation":"

Changes the properties of a custom key store. You can use this operation to change the properties of an CloudHSM key store or an external key store.

Use the required CustomKeyStoreId parameter to identify the custom key store. Use the remaining optional parameters to change its properties. This operation does not return any property values. To verify the updated property values, use the DescribeCustomKeyStores operation.

This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

When updating the properties of an external key store, verify that the updated settings connect your key store, via the external key store proxy, to the same external key manager as the previous settings, or to a backup or snapshot of the external key manager with the same cryptographic keys. If the updated connection settings fail, you can fix them and retry, although an extended delay might disrupt Amazon Web Services services. However, if KMS permanently loses its access to cryptographic keys, ciphertext encrypted under those keys is unrecoverable.

For external key stores:

Some external key managers provide a simpler method for updating an external key store. For details, see your external key manager documentation.

When updating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot upload the proxy configuration file to the UpdateCustomKeyStore operation. However, you can use the file to help you determine the correct values for the UpdateCustomKeyStore parameters.

For an CloudHSM key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), to tell KMS about a change to the kmsuser crypto user password (KeyStorePassword), or to associate the custom key store with a different, but related, CloudHSM cluster (CloudHsmClusterId). To update any property of an CloudHSM key store, the ConnectionState of the CloudHSM key store must be DISCONNECTED.

For an external key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), or to tell KMS about a change to the external key store proxy authentication credentials (XksProxyAuthenticationCredential), connection method (XksProxyConnectivity), external proxy endpoint (XksProxyUriEndpoint) and path (XksProxyUriPath). For external key stores with an XksProxyConnectivity of VPC_ENDPOINT_SERVICE, you can also update the Amazon VPC endpoint service name (XksProxyVpcEndpointServiceName). To update most properties of an external key store, the ConnectionState of the external key store must be DISCONNECTED. However, you can update the CustomKeyStoreName, XksProxyAuthenticationCredential, and XksProxyUriPath of an external key store when it is in the CONNECTED or DISCONNECTED state.

If your update requires a DISCONNECTED state, before using UpdateCustomKeyStore, use the DisconnectCustomKeyStore operation to disconnect the custom key store. After the UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore to reconnect the custom key store. To find the ConnectionState of the custom key store, use the DescribeCustomKeyStores operation.

Before updating the custom key store, verify that the new values allow KMS to connect the custom key store to its backing key store. For example, before you change the XksProxyUriPath value, verify that the external key store proxy is reachable at the new path.

If the operation succeeds, it returns a JSON object with no properties.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:UpdateCustomKeyStore (IAM policy)

Related operations:

" }, "UpdateKeyDescription":{ "name":"UpdateKeyDescription", @@ -902,7 +925,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"KMSInvalidSignatureException"} ], - "documentation":"

Verifies a digital signature that was generated by the Sign operation.

Verification confirms that an authorized user signed the message with the specified KMS key and signing algorithm, and the message hasn't changed since it was signed. If the signature is verified, the value of the SignatureValid field in the response is True. If the signature verification fails, the Verify operation fails with an KMSInvalidSignatureException exception.

A digital signature is generated by using the private key in an asymmetric KMS key. The signature is verified by using the public key in the same asymmetric KMS key. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

To verify a digital signature, you can use the Verify operation. Specify the same asymmetric KMS key, message, and signing algorithm that were used to produce the signature.

You can also verify the digital signature by using the public key of the KMS key outside of KMS. Use the GetPublicKey operation to download the public key in the asymmetric KMS key and then use the public key to verify the signature outside of KMS. To verify a signature outside of KMS with an SM2 public key, you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs in Key Management Service Developer Guide. The advantage of using the Verify operation is that it is performed within KMS. As a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use the KMS key to verify signatures.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Verify (key policy)

Related operations: Sign

" + "documentation":"

Verifies a digital signature that was generated by the Sign operation.

Verification confirms that an authorized user signed the message with the specified KMS key and signing algorithm, and the message hasn't changed since it was signed. If the signature is verified, the value of the SignatureValid field in the response is True. If the signature verification fails, the Verify operation fails with an KMSInvalidSignatureException exception.

A digital signature is generated by using the private key in an asymmetric KMS key. The signature is verified by using the public key in the same asymmetric KMS key. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

To verify a digital signature, you can use the Verify operation. Specify the same asymmetric KMS key, message, and signing algorithm that were used to produce the signature.

You can also verify the digital signature by using the public key of the KMS key outside of KMS. Use the GetPublicKey operation to download the public key in the asymmetric KMS key and then use the public key to verify the signature outside of KMS. The advantage of using the Verify operation is that it is performed within KMS. As a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use the KMS key to verify signatures.

To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Verify (key policy)

Related operations: Sign

" }, "VerifyMac":{ "name":"VerifyMac", @@ -922,7 +945,7 @@ {"shape":"KMSInvalidMacException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Verifies the hash-based message authentication code (HMAC) for a specified message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, and compares the computed HMAC to the HMAC that you specify. If the HMACs are identical, the verification succeeds; otherwise, it fails.

Verification indicates that the message hasn't changed since the HMAC was calculated, and the specified key was used to generate and verify the HMAC.

This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:VerifyMac (key policy)

Related operations: GenerateMac

" + "documentation":"

Verifies the hash-based message authentication code (HMAC) for a specified message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, and compares the computed HMAC to the HMAC that you specify. If the HMACs are identical, the verification succeeds; otherwise, it fails. Verification indicates that the message hasn't changed since the HMAC was calculated, and the specified key was used to generate and verify the HMAC.

HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:VerifyMac (key policy)

Related operations: GenerateMac

" } }, "shapes":{ @@ -1019,7 +1042,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The request was rejected because the specified CloudHSM cluster is already associated with a custom key store or it shares a backup history with a cluster that is associated with a custom key store. Each custom key store must be associated with a different CloudHSM cluster.

Clusters that share a backup history have the same cluster certificate. To view the cluster certificate of a cluster, use the DescribeClusters operation.

", + "documentation":"

The request was rejected because the specified CloudHSM cluster is already associated with an CloudHSM key store in the account, or it shares a backup history with an CloudHSM key store in the account. Each CloudHSM key store in the account must be associated with a different CloudHSM cluster.

CloudHSM clusters that share a backup history have the same cluster certificate. To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters operation.

", "exception":true }, "CloudHsmClusterInvalidConfigurationException":{ @@ -1027,7 +1050,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The request was rejected because the associated CloudHSM cluster did not meet the configuration requirements for a custom key store.

For information about the requirements for an CloudHSM cluster that is associated with a custom key store, see Assemble the Prerequisites in the Key Management Service Developer Guide. For information about creating a private subnet for an CloudHSM cluster, see Create a Private Subnet in the CloudHSM User Guide. For information about cluster security groups, see Configure a Default Security Group in the CloudHSM User Guide .

", + "documentation":"

The request was rejected because the associated CloudHSM cluster did not meet the configuration requirements for an CloudHSM key store.

For information about the requirements for an CloudHSM cluster that is associated with an CloudHSM key store, see Assemble the Prerequisites in the Key Management Service Developer Guide. For information about creating a private subnet for an CloudHSM cluster, see Create a Private Subnet in the CloudHSM User Guide. For information about cluster security groups, see Configure a Default Security Group in the CloudHSM User Guide .

", "exception":true }, "CloudHsmClusterNotActiveException":{ @@ -1035,7 +1058,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The request was rejected because the CloudHSM cluster that is associated with the custom key store is not active. Initialize and activate the cluster and try the command again. For detailed instructions, see Getting Started in the CloudHSM User Guide.

", + "documentation":"

The request was rejected because the CloudHSM cluster associated with the CloudHSM key store is not active. Initialize and activate the cluster and try the command again. For detailed instructions, see Getting Started in the CloudHSM User Guide.

", "exception":true }, "CloudHsmClusterNotFoundException":{ @@ -1051,7 +1074,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The request was rejected because the specified CloudHSM cluster has a different cluster certificate than the original cluster. You cannot use the operation to specify an unrelated cluster.

Specify a cluster that shares a backup history with the original cluster. This includes clusters that were created from a backup of the current cluster, and clusters that were created from the same backup that produced the current cluster.

Clusters that share a backup history have the same cluster certificate. To view the cluster certificate of a cluster, use the DescribeClusters operation.

", + "documentation":"

The request was rejected because the specified CloudHSM cluster has a different cluster certificate than the original cluster. You cannot use the operation to specify an unrelated cluster for an CloudHSM key store.

Specify an CloudHSM cluster that shares a backup history with the original cluster. This includes clusters that were created from a backup of the current cluster, and clusters that were created from the same backup that produced the current cluster.

CloudHSM clusters that share a backup history have the same cluster certificate. To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters operation.

", "exception":true }, "ConnectCustomKeyStoreRequest":{ @@ -1081,7 +1104,15 @@ "USER_NOT_FOUND", "USER_LOGGED_IN", "SUBNET_NOT_FOUND", - "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET" + "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET", + "XKS_PROXY_ACCESS_DENIED", + "XKS_PROXY_NOT_REACHABLE", + "XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND", + "XKS_PROXY_INVALID_RESPONSE", + "XKS_PROXY_INVALID_CONFIGURATION", + "XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION", + "XKS_PROXY_TIMED_OUT", + "XKS_PROXY_INVALID_TLS_CONFIGURATION" ] }, "ConnectionStateType":{ @@ -1117,19 +1148,43 @@ "members":{ "CustomKeyStoreName":{ "shape":"CustomKeyStoreNameType", - "documentation":"

Specifies a friendly name for the custom key store. The name must be unique in your Amazon Web Services account.

" + "documentation":"

Specifies a friendly name for the custom key store. The name must be unique in your Amazon Web Services account and Region. This parameter is required for all custom key stores.

" }, "CloudHsmClusterId":{ "shape":"CloudHsmClusterIdType", - "documentation":"

Identifies the CloudHSM cluster for the custom key store. Enter the cluster ID of any active CloudHSM cluster that is not already associated with a custom key store. To find the cluster ID, use the DescribeClusters operation.

" + "documentation":"

Identifies the CloudHSM cluster for an CloudHSM key store. This parameter is required for custom key stores with CustomKeyStoreType of AWS_CLOUDHSM.

Enter the cluster ID of any active CloudHSM cluster that is not already associated with a custom key store. To find the cluster ID, use the DescribeClusters operation.

" }, "TrustAnchorCertificate":{ "shape":"TrustAnchorCertificateType", - "documentation":"

Enter the content of the trust anchor certificate for the cluster. This is the content of the customerCA.crt file that you created when you initialized the cluster.

" + "documentation":"

* CreateCustom

Specifies the certificate for an CloudHSM key store. This parameter is required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.

Enter the content of the trust anchor certificate for the CloudHSM cluster. This is the content of the customerCA.crt file that you created when you initialized the cluster.

" }, "KeyStorePassword":{ "shape":"KeyStorePasswordType", - "documentation":"

Enter the password of the kmsuser crypto user (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this user to manage key material on your behalf.

The password must be a string of 7 to 32 characters. Its value is case sensitive.

This parameter tells KMS the kmsuser account password; it does not change the password in the CloudHSM cluster.

" + "documentation":"

Specifies the kmsuser password for an CloudHSM key store. This parameter is required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.

Enter the password of the kmsuser crypto user (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this user to manage key material on your behalf.

The password must be a string of 7 to 32 characters. Its value is case sensitive.

This parameter tells KMS the kmsuser account password; it does not change the password in the CloudHSM cluster.

" + }, + "CustomKeyStoreType":{ + "shape":"CustomKeyStoreType", + "documentation":"

Specifies the type of custom key store. The default value is AWS_CLOUDHSM.

For a custom key store backed by an CloudHSM cluster, omit the parameter or enter AWS_CLOUDHSM. For a custom key store backed by an external key manager outside of Amazon Web Services, enter EXTERNAL_KEY_STORE. You cannot change this property after the key store is created.

" + }, + "XksProxyUriEndpoint":{ + "shape":"XksProxyUriEndpointType", + "documentation":"

Specifies the endpoint that KMS uses to send requests to the external key store proxy (XKS proxy). This parameter is required for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

The protocol must be HTTPS. KMS communicates on port 443. Do not specify the port in the XksProxyUriEndpoint value.

For external key stores with XksProxyConnectivity value of VPC_ENDPOINT_SERVICE, specify https:// followed by the private DNS name of the VPC endpoint service.

For external key stores with PUBLIC_ENDPOINT connectivity, this endpoint must be reachable before you create the custom key store. KMS connects to the external key store proxy while creating the custom key store. For external key stores with VPC_ENDPOINT_SERVICE connectivity, KMS connects when you call the ConnectCustomKeyStore operation.

The value of this parameter must begin with https://. The remainder can contain upper and lower case letters (A-Z and a-z), numbers (0-9), dots (.), and hyphens (-). Additional slashes (/ and \\) are not permitted.

Uniqueness requirements:

" + }, + "XksProxyUriPath":{ + "shape":"XksProxyUriPathType", + "documentation":"

Specifies the base path to the proxy APIs for this external key store. To find this value, see the documentation for your external key store proxy. This parameter is required for all custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

The value must start with / and must end with /kms/xks/v1 where v1 represents the version of the KMS external key store proxy API. This path can include an optional prefix between the required elements such as /prefix/kms/xks/v1.

Uniqueness requirements:

" + }, + "XksProxyVpcEndpointServiceName":{ + "shape":"XksProxyVpcEndpointServiceNameType", + "documentation":"

Specifies the name of the Amazon VPC endpoint service for interface endpoints that is used to communicate with your external key store proxy (XKS proxy). This parameter is required when the value of CustomKeyStoreType is EXTERNAL_KEY_STORE and the value of XksProxyConnectivity is VPC_ENDPOINT_SERVICE.

The Amazon VPC endpoint service must fulfill all requirements for use with an external key store.

Uniqueness requirements:

" + }, + "XksProxyAuthenticationCredential":{ + "shape":"XksProxyAuthenticationCredentialType", + "documentation":"

Specifies an authentication credential for the external key store proxy (XKS proxy). This parameter is required for all custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

The XksProxyAuthenticationCredential has two required elements: RawSecretAccessKey, a secret key, and AccessKeyId, a unique identifier for the RawSecretAccessKey. For character requirements, see XksProxyAuthenticationCredentialType.

KMS uses this authentication credential to sign requests to the external key store proxy on your behalf. This credential is unrelated to Identity and Access Management (IAM) and Amazon Web Services credentials.

This parameter doesn't set or change the authentication credentials on the XKS proxy. It just tells KMS the credential that you established on your external key store proxy. If you rotate your proxy authentication credential, use the UpdateCustomKeyStore operation to provide the new credential to KMS.

" + }, + "XksProxyConnectivity":{ + "shape":"XksProxyConnectivityType", + "documentation":"

Indicates how KMS communicates with the external key store proxy. This parameter is required for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

If the external key store proxy uses a public endpoint, specify PUBLIC_ENDPOINT. If the external key store proxy uses a Amazon VPC endpoint service for communication with KMS, specify VPC_ENDPOINT_SERVICE. For help making this choice, see Choosing a connectivity option in the Key Management Service Developer Guide.

An Amazon VPC endpoint service keeps your communication with KMS in a private address space entirely within Amazon Web Services, but it requires more configuration, including establishing a Amazon VPC with multiple subnets, a VPC endpoint service, a network load balancer, and a verified private DNS name. A public endpoint is simpler to set up, but it might be slower and might not fulfill your security requirements. You might consider testing with a public endpoint, and then establishing a VPC endpoint service for production tasks. Note that this choice does not determine the location of the external key store proxy. Even if you choose a VPC endpoint service, the proxy can be hosted within the VPC or outside of Amazon Web Services such as in your corporate data center.

" } } }, @@ -1198,7 +1253,7 @@ "members":{ "Policy":{ "shape":"PolicyType", - "documentation":"

The key policy to attach to the KMS key. If you do not specify a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default key policy in the Key Management Service Developer Guide.

If you provide a key policy, it must meet the following criteria:

A key policy document can include only the following characters:

For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

" + "documentation":"

The key policy to attach to the KMS key.

If you provide a key policy, it must meet the following criteria:

If you do not provide a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default Key Policy in the Key Management Service Developer Guide.

The key policy size quota is 32 kilobytes (32768 bytes).

For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

" }, "Description":{ "shape":"DescriptionType", @@ -1210,21 +1265,21 @@ }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", - "documentation":"

Instead, use the KeySpec parameter.

The KeySpec and CustomerMasterKeySpec parameters work the same way. Only the names differ. We recommend that you use KeySpec parameter in your code. However, to avoid breaking changes, KMS will support both parameters.

", + "documentation":"

Instead, use the KeySpec parameter.

The KeySpec and CustomerMasterKeySpec parameters work the same way. Only the names differ. We recommend that you use KeySpec parameter in your code. However, to avoid breaking changes, KMS supports both parameters.

", "deprecated":true, "deprecatedMessage":"This parameter has been deprecated. Instead, use the KeySpec parameter." }, "KeySpec":{ "shape":"KeySpec", - "documentation":"

Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the cryptographic algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

KMS supports the following key specs for KMS keys:

" + "documentation":"

Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

KMS supports the following key specs for KMS keys:

" }, "Origin":{ "shape":"OriginType", - "documentation":"

The source of the key material for the KMS key. You cannot change the origin after you create the KMS key. The default is AWS_KMS, which means that KMS creates the key material.

To create a KMS key with no key material (for imported key material), set the value to EXTERNAL. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide. This value is valid only for symmetric encryption KMS keys.

To create a KMS key in an KMS custom key store and create its key material in the associated CloudHSM cluster, set this value to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to identify the custom key store. This value is valid only for symmetric encryption KMS keys.

" + "documentation":"

The source of the key material for the KMS key. You cannot change the origin after you create the KMS key. The default is AWS_KMS, which means that KMS creates the key material.

To create a KMS key with no key material (for imported key material), set this value to EXTERNAL. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide. The EXTERNAL origin value is valid only for symmetric KMS keys.

To create a KMS key in an CloudHSM key store and create its key material in the associated CloudHSM cluster, set this value to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to identify the CloudHSM key store. The KeySpec value must be SYMMETRIC_DEFAULT.

To create a KMS key in an external key store, set this value to EXTERNAL_KEY_STORE. You must also use the CustomKeyStoreId parameter to identify the external key store and the XksKeyId parameter to identify the associated external key. The KeySpec value must be SYMMETRIC_DEFAULT.

" }, "CustomKeyStoreId":{ "shape":"CustomKeyStoreIdType", - "documentation":"

Creates the KMS key in the specified custom key store and the key material in its associated CloudHSM cluster. To create a KMS key in a custom key store, you must also specify the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs, each in a different Availability Zone in the Region.

This parameter is valid only for symmetric encryption KMS keys in a single Region. You cannot create any other type of KMS key in a custom key store.

To find the ID of a custom key store, use the DescribeCustomKeyStores operation.

The response includes the custom key store ID and the ID of the CloudHSM cluster.

This operation is part of the custom key store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

" + "documentation":"

Creates the KMS key in the specified custom key store. The ConnectionState of the custom key store must be CONNECTED. To find the CustomKeyStoreID and ConnectionState use the DescribeCustomKeyStores operation.

This parameter is valid only for symmetric encryption KMS keys in a single Region. You cannot create any other type of KMS key in a custom key store.

When you create a KMS key in an CloudHSM key store, KMS generates a non-exportable 256-bit symmetric key in its associated CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you must use the XksKeyId parameter to specify an external key that serves as key material for the KMS key.

" }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -1232,11 +1287,15 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

To use this parameter, you must have kms:TagResource permission in an IAM policy.

Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys.

" + "documentation":"

Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

To use this parameter, you must have kms:TagResource permission in an IAM policy.

Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys.

" }, "MultiRegion":{ "shape":"NullableBooleanType", - "documentation":"

Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key.

For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this parameter or set it to False. The default value is False.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

This value creates a primary key, not a replica. To create a replica key, use the ReplicateKey operation.

You can create a multi-Region version of a symmetric encryption KMS key, an HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material. However, you cannot create a multi-Region key in a custom key store.

" + "documentation":"

Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key.

For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this parameter or set it to False. The default value is False.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

This value creates a primary key, not a replica. To create a replica key, use the ReplicateKey operation.

You can create a symmetric or asymmetric multi-Region key, and you can create a multi-Region key with imported key material. However, you cannot create a multi-Region key in a custom key store.

" + }, + "XksKeyId":{ + "shape":"XksKeyIdType", + "documentation":"

Identifies the external key that serves as key material for the KMS key in an external key store. Specify the ID that the external key store proxy uses to refer to the external key. For help, see the documentation for your external key store proxy.

This parameter is required for a KMS key with an Origin value of EXTERNAL_KEY_STORE. It is not valid for KMS keys with any other Origin value.

The external key must be an existing 256-bit AES symmetric encryption key hosted outside of Amazon Web Services in an external key manager associated with the external key store specified by the CustomKeyStoreId parameter. This key must be enabled and configured to perform encryption and decryption. Each KMS key in an external key store must use a different external key. For details, see Requirements for a KMS key in an external key store in the Key Management Service Developer Guide.

Each KMS key in an external key store is associated two backing keys. One is key material that KMS generates. The other is the external key specified by this parameter. When you use the KMS key in an external key store to encrypt data, the encryption operation is performed first by KMS using the KMS key material, and then by the external key manager using the specified external key, a process known as double encryption. For details, see Double encryption in the Key Management Service Developer Guide.

" } } }, @@ -1267,7 +1326,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The request was rejected because of the ConnectionState of the custom key store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores operation.

This exception is thrown under the following conditions:

", + "documentation":"

The request was rejected because of the ConnectionState of the custom key store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores operation.

This exception is thrown under the following conditions:

", "exception":true }, "CustomKeyStoreNameInUseException":{ @@ -1291,6 +1350,13 @@ "documentation":"

The request was rejected because KMS cannot find a custom key store with the specified key store name or ID.

", "exception":true }, + "CustomKeyStoreType":{ + "type":"string", + "enum":[ + "AWS_CLOUDHSM", + "EXTERNAL_KEY_STORE" + ] + }, "CustomKeyStoresList":{ "type":"list", "member":{"shape":"CustomKeyStoresListEntry"} @@ -1308,23 +1374,31 @@ }, "CloudHsmClusterId":{ "shape":"CloudHsmClusterIdType", - "documentation":"

A unique identifier for the CloudHSM cluster that is associated with the custom key store.

" + "documentation":"

A unique identifier for the CloudHSM cluster that is associated with an CloudHSM key store. This field appears only when the CustomKeyStoreType is AWS_CLOUDHSM.

" }, "TrustAnchorCertificate":{ "shape":"TrustAnchorCertificateType", - "documentation":"

The trust anchor certificate of the associated CloudHSM cluster. When you initialize the cluster, you create this certificate and save it in the customerCA.crt file.

" + "documentation":"

The trust anchor certificate of the CloudHSM cluster associated with an CloudHSM key store. When you initialize the cluster, you create this certificate and save it in the customerCA.crt file.

This field appears only when the CustomKeyStoreType is AWS_CLOUDHSM.

" }, "ConnectionState":{ "shape":"ConnectionStateType", - "documentation":"

Indicates whether the custom key store is connected to its CloudHSM cluster.

You can create and use KMS keys in your custom key stores only when its connection state is CONNECTED.

The value is DISCONNECTED if the key store has never been connected or you use the DisconnectCustomKeyStore operation to disconnect it. If the value is CONNECTED but you are having trouble using the custom key store, make sure that its associated CloudHSM cluster is active and contains at least one active HSM.

A value of FAILED indicates that an attempt to connect was unsuccessful. The ConnectionErrorCode field in the response indicates the cause of the failure. For help resolving a connection failure, see Troubleshooting a Custom Key Store in the Key Management Service Developer Guide.

" + "documentation":"

Indicates whether the custom key store is connected to its backing key store. For an CloudHSM key store, the ConnectionState indicates whether it is connected to its CloudHSM cluster. For an external key store, the ConnectionState indicates whether it is connected to the external key store proxy that communicates with your external key manager.

You can create and use KMS keys in your custom key stores only when its ConnectionState is CONNECTED.

The ConnectionState value is DISCONNECTED only if the key store has never been connected or you use the DisconnectCustomKeyStore operation to disconnect it. If the value is CONNECTED but you are having trouble using the custom key store, make sure that the backing key store is reachable and active. For an CloudHSM key store, verify that its associated CloudHSM cluster is active and contains at least one active HSM. For an external key store, verify that the external key store proxy and external key manager are connected and enabled.

A value of FAILED indicates that an attempt to connect was unsuccessful. The ConnectionErrorCode field in the response indicates the cause of the failure. For help resolving a connection failure, see Troubleshooting a custom key store in the Key Management Service Developer Guide.

" }, "ConnectionErrorCode":{ "shape":"ConnectionErrorCodeType", - "documentation":"

Describes the connection error. This field appears in the response only when the ConnectionState is FAILED. For help resolving these errors, see How to Fix a Connection Failure in Key Management Service Developer Guide.

Valid values are:

" + "documentation":"

Describes the connection error. This field appears in the response only when the ConnectionState is FAILED.

Many failures can be resolved by updating the properties of the custom key store. To update a custom key store, disconnect it (DisconnectCustomKeyStore), correct the errors (UpdateCustomKeyStore), and try to connect again (ConnectCustomKeyStore). For additional help resolving these errors, see How to Fix a Connection Failure in Key Management Service Developer Guide.

All custom key stores:

CloudHSM key stores:

External key stores:

" }, "CreationDate":{ "shape":"DateType", "documentation":"

The date and time when the custom key store was created.

" + }, + "CustomKeyStoreType":{ + "shape":"CustomKeyStoreType", + "documentation":"

Indicates the type of the custom key store. AWS_CLOUDHSM indicates a custom key store backed by an CloudHSM cluster. EXTERNAL_KEY_STORE indicates a custom key store backed by an external key store proxy and external key manager outside of Amazon Web Services.

" + }, + "XksProxyConfiguration":{ + "shape":"XksProxyConfigurationType", + "documentation":"

Configuration settings for the external key store proxy (XKS proxy). The external key store proxy translates KMS requests into a format that your external key manager can understand. The proxy configuration includes connection information that KMS requires.

This field appears only when the CustomKeyStoreType is EXTERNAL_KEY_STORE.

" } }, "documentation":"

Contains information about each custom key store in the custom key store list.

" @@ -1453,7 +1527,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The system timed out while trying to fulfill the request. The request can be retried.

", + "documentation":"

The system timed out while trying to fulfill the request. You can retry the request.

", "exception":true, "fault":true }, @@ -1462,11 +1536,11 @@ "members":{ "CustomKeyStoreId":{ "shape":"CustomKeyStoreIdType", - "documentation":"

Gets only information about the specified custom key store. Enter the key store ID.

By default, this operation gets information about all custom key stores in the account and Region. To limit the output to a particular custom key store, you can use either the CustomKeyStoreId or CustomKeyStoreName parameter, but not both.

" + "documentation":"

Gets only information about the specified custom key store. Enter the key store ID.

By default, this operation gets information about all custom key stores in the account and Region. To limit the output to a particular custom key store, provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but not both.

" }, "CustomKeyStoreName":{ "shape":"CustomKeyStoreNameType", - "documentation":"

Gets only information about the specified custom key store. Enter the friendly name of the custom key store.

By default, this operation gets information about all custom key stores in the account and Region. To limit the output to a particular custom key store, you can use either the CustomKeyStoreId or CustomKeyStoreName parameter, but not both.

" + "documentation":"

Gets only information about the specified custom key store. Enter the friendly name of the custom key store.

By default, this operation gets information about all custom key stores in the account and Region. To limit the output to a particular custom key store, provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but not both.

" }, "Limit":{ "shape":"LimitType", @@ -1582,7 +1656,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

Identifies a symmetric encryption KMS key. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

" + "documentation":"

Identifies a symmetric encryption KMS key. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

" } } }, @@ -1611,7 +1685,7 @@ }, "EncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", - "documentation":"

Specifies the encryption algorithm that KMS will use to encrypt the plaintext message. The algorithm must be compatible with the KMS key that you specify.

This parameter is required only for asymmetric KMS keys. The default value, SYMMETRIC_DEFAULT, is the algorithm used for symmetric encryption KMS keys. If you are using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256.

" + "documentation":"

Specifies the encryption algorithm that KMS will use to encrypt the plaintext message. The algorithm must be compatible with the KMS key that you specify.

This parameter is required only for asymmetric KMS keys. The default value, SYMMETRIC_DEFAULT, is the algorithm used for symmetric encryption KMS keys. If you are using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256.

The SM2PKE algorithm is only available in China Regions.

" } } }, @@ -1685,7 +1759,7 @@ }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", - "documentation":"

Determines the type of data key pair that is generated.

The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.

" + "documentation":"

Determines the type of data key pair that is generated.

The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -1735,7 +1809,7 @@ }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", - "documentation":"

Determines the type of data key pair that is generated.

The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.

" + "documentation":"

Determines the type of data key pair that is generated.

The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -1877,7 +1951,7 @@ "members":{ "Mac":{ "shape":"CiphertextType", - "documentation":"

The hash-based message authentication code (HMAC) for the given message, key, and MAC algorithm.

" + "documentation":"

The hash-based message authentication code (HMAC) that was generated for the specified message, HMAC KMS key, and MAC algorithm.

This is the standard, raw HMAC defined in RFC 2104.

" }, "MacAlgorithm":{ "shape":"MacAlgorithmSpec", @@ -1898,7 +1972,7 @@ }, "CustomKeyStoreId":{ "shape":"CustomKeyStoreIdType", - "documentation":"

Generates the random byte string in the CloudHSM cluster that is associated with the specified custom key store. To find the ID of a custom key store, use the DescribeCustomKeyStores operation.

" + "documentation":"

Generates the random byte string in the CloudHSM cluster that is associated with the specified CloudHSM key store. To find the ID of a custom key store, use the DescribeCustomKeyStores operation.

External key store IDs are not valid for this parameter. If you specify the ID of an external key store, GenerateRandom throws an UnsupportedOperationException.

" } } }, @@ -2026,7 +2100,7 @@ }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", - "documentation":"

Instead, use the KeySpec field in the GetPublicKey response.

The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend that you use the KeySpec field in your code. However, to avoid breaking changes, KMS will support both fields.

", + "documentation":"

Instead, use the KeySpec field in the GetPublicKey response.

The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend that you use the KeySpec field in your code. However, to avoid breaking changes, KMS supports both fields.

", "deprecated":true, "deprecatedMessage":"This field has been deprecated. Instead, use the KeySpec field." }, @@ -2060,7 +2134,7 @@ "documentation":"

A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

" } }, - "documentation":"

Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

KMS applies the grant constraints only to cryptographic operations that support an encryption context, that is, all cryptographic operations with a symmetric encryption KMS key. Grant constraints are not applied to operations that do not support an encryption context, such as cryptographic operations with HMAC KMS keys or asymmetric KMS keys, and management operations, such as DescribeKey or RetireGrant.

In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext: in the Key Management Service Developer Guide .

" + "documentation":"

Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

KMS applies the grant constraints only to cryptographic operations that support an encryption context, that is, all cryptographic operations with a symmetric KMS key. Grant constraints are not applied to operations that do not support an encryption context, such as cryptographic operations with asymmetric KMS keys and management operations, such as DescribeKey or RetireGrant.

In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext: in the Key Management Service Developer Guide .

" }, "GrantIdType":{ "type":"string", @@ -2177,11 +2251,11 @@ }, "ValidTo":{ "shape":"DateType", - "documentation":"

The time at which the imported key material expires. When the key material expires, KMS deletes the key material and the KMS key becomes unusable. You must omit this parameter when the ExpirationModel parameter is set to KEY_MATERIAL_DOES_NOT_EXPIRE. Otherwise it is required.

" + "documentation":"

The date and time when the imported key material expires. This parameter is required when the value of the ExpirationModel parameter is KEY_MATERIAL_EXPIRES. Otherwise it is not valid.

The value of this parameter must be a future date and time. The maximum value is 365 days from the request date.

When the key material expires, KMS deletes the key material from the KMS key. Without its key material, the KMS key is unusable. To use the KMS key in cryptographic operations, you must reimport the same key material.

You cannot change the ExpirationModel or ValidTo values for the current import after the request completes. To change either value, you must delete (DeleteImportedKeyMaterial) and reimport the key material.

" }, "ExpirationModel":{ "shape":"ExpirationModelType", - "documentation":"

Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES, in which case you must include the ValidTo parameter. When this parameter is set to KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.

" + "documentation":"

Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES.

When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify a value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.

You cannot change the ExpirationModel or ValidTo values for the current import after the request completes. To change either value, you must delete (DeleteImportedKeyMaterial) and reimport the key material.

" } } }, @@ -2211,7 +2285,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The request was rejected because the trust anchor certificate in the request is not the trust anchor certificate for the specified CloudHSM cluster.

When you initialize the cluster, you create the trust anchor certificate and save it in the customerCA.crt file.

", + "documentation":"

The request was rejected because the trust anchor certificate in the request to create an CloudHSM key store is not the trust anchor certificate for the specified CloudHSM cluster.

When you initialize the CloudHSM cluster, you create the trust anchor certificate and save it in the customerCA.crt file.

", "exception":true }, "InvalidAliasNameException":{ @@ -2308,7 +2382,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The request was rejected because the state of the specified resource is not valid for this request.

For more information about how key state affects the use of a KMS key, see Key states of KMS keys in the Key Management Service Developer Guide .

", + "documentation":"

The request was rejected because the state of the specified resource is not valid for this request.

This exceptions means one of the following:

", "exception":true }, "KeyIdType":{ @@ -2391,11 +2465,11 @@ }, "CustomKeyStoreId":{ "shape":"CustomKeyStoreIdType", - "documentation":"

A unique identifier for the custom key store that contains the KMS key. This value is present only when the KMS key is created in a custom key store.

" + "documentation":"

A unique identifier for the custom key store that contains the KMS key. This field is present only when the KMS key is created in a custom key store.

" }, "CloudHsmClusterId":{ "shape":"CloudHsmClusterIdType", - "documentation":"

The cluster ID of the CloudHSM cluster that contains the key material for the KMS key. When you create a KMS key in a custom key store, KMS creates the key material for the KMS key in the associated CloudHSM cluster. This value is present only when the KMS key is created in a custom key store.

" + "documentation":"

The cluster ID of the CloudHSM cluster that contains the key material for the KMS key. When you create a KMS key in an CloudHSM custom key store, KMS creates the key material for the KMS key in the associated CloudHSM cluster. This field is present only when the KMS key is created in an CloudHSM key store.

" }, "ExpirationModel":{ "shape":"ExpirationModelType", @@ -2407,7 +2481,7 @@ }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", - "documentation":"

Instead, use the KeySpec field.

The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend that you use the KeySpec field in your code. However, to avoid breaking changes, KMS will support both fields.

", + "documentation":"

Instead, use the KeySpec field.

The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend that you use the KeySpec field in your code. However, to avoid breaking changes, KMS supports both fields.

", "deprecated":true, "deprecatedMessage":"This field has been deprecated. Instead, use the KeySpec field." }, @@ -2438,9 +2512,13 @@ "MacAlgorithms":{ "shape":"MacAlgorithmSpecList", "documentation":"

The message authentication code (MAC) algorithm that the HMAC KMS key supports.

This value is present only when the KeyUsage of the KMS key is GENERATE_VERIFY_MAC.

" + }, + "XksKeyConfiguration":{ + "shape":"XksKeyConfigurationType", + "documentation":"

Information about the external key that is associated with a KMS key in an external key store.

For more information, see External key in the Key Management Service Developer Guide.

" } }, - "documentation":"

Contains metadata about a KMS key.

This data type is used as a response element for the CreateKey and DescribeKey operations.

" + "documentation":"

Contains metadata about a KMS key.

This data type is used as a response element for the CreateKey, DescribeKey, and ReplicateKey operations.

" }, "KeySpec":{ "type":"string", @@ -2674,7 +2752,7 @@ "members":{ "Tags":{ "shape":"TagList", - "documentation":"

A list of tags. Each tag consists of a tag key and a tag value.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

" + "documentation":"

A list of tags. Each tag consists of a tag key and a tag value.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

" }, "NextMarker":{ "shape":"MarkerType", @@ -2800,7 +2878,8 @@ "enum":[ "AWS_KMS", "EXTERNAL", - "AWS_CLOUDHSM" + "AWS_CLOUDHSM", + "EXTERNAL_KEY_STORE" ] }, "PendingWindowInDaysType":{ @@ -2859,7 +2938,7 @@ }, "Policy":{ "shape":"PolicyType", - "documentation":"

The key policy to attach to the KMS key.

The key policy must meet the following criteria:

A key policy document can include only the following characters:

For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

" + "documentation":"

The key policy to attach to the KMS key.

The key policy must meet the following criteria:

A key policy document can include only the following characters:

For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide.For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

" }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -2968,7 +3047,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Assigns one or more tags to the replica key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

To use this parameter, you must have kms:TagResource permission in an IAM policy.

Tags are not a shared property of multi-Region keys. You can specify the same tags or different tags for each key in a set of related multi-Region keys. KMS does not synchronize this property.

Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys.

" + "documentation":"

Assigns one or more tags to the replica key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

To use this parameter, you must have kms:TagResource permission in an IAM policy.

Tags are not a shared property of multi-Region keys. You can specify the same tags or different tags for each key in a set of related multi-Region keys. KMS does not synchronize this property.

Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys.

" } } }, @@ -3228,7 +3307,7 @@ }, "TargetKeyId":{ "shape":"KeyIdType", - "documentation":"

Identifies the customer managed key to associate with the alias. You don't have permission to associate an alias with an Amazon Web Services managed key.

The KMS key must be in the same Amazon Web Services account and Region as the alias. Also, the new target KMS key must be the same type as the current target KMS key (both symmetric or both asymmetric) and they must have the same key usage.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

To verify that the alias is mapped to the correct KMS key, use ListAliases.

" + "documentation":"

Identifies the customer managed key to associate with the alias. You don't have permission to associate an alias with an Amazon Web Services managed key.

The KMS key must be in the same Amazon Web Services account and Region as the alias. Also, the new target KMS key must be the same type as the current target KMS key (both symmetric or both asymmetric or both HMAC) and they must have the same key usage.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

To verify that the alias is mapped to the correct KMS key, use ListAliases.

" } } }, @@ -3242,15 +3321,35 @@ }, "NewCustomKeyStoreName":{ "shape":"CustomKeyStoreNameType", - "documentation":"

Changes the friendly name of the custom key store to the value that you specify. The custom key store name must be unique in the Amazon Web Services account.

" + "documentation":"

Changes the friendly name of the custom key store to the value that you specify. The custom key store name must be unique in the Amazon Web Services account.

To change this value, an CloudHSM key store must be disconnected. An external key store can be connected or disconnected.

" }, "KeyStorePassword":{ "shape":"KeyStorePasswordType", - "documentation":"

Enter the current password of the kmsuser crypto user (CU) in the CloudHSM cluster that is associated with the custom key store.

This parameter tells KMS the current password of the kmsuser crypto user (CU). It does not set or change the password of any users in the CloudHSM cluster.

" + "documentation":"

Enter the current password of the kmsuser crypto user (CU) in the CloudHSM cluster that is associated with the custom key store. This parameter is valid only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.

This parameter tells KMS the current password of the kmsuser crypto user (CU). It does not set or change the password of any users in the CloudHSM cluster.

To change this value, the CloudHSM key store must be disconnected.

" }, "CloudHsmClusterId":{ "shape":"CloudHsmClusterIdType", - "documentation":"

Associates the custom key store with a related CloudHSM cluster.

Enter the cluster ID of the cluster that you used to create the custom key store or a cluster that shares a backup history and has the same cluster certificate as the original cluster. You cannot use this parameter to associate a custom key store with an unrelated cluster. In addition, the replacement cluster must fulfill the requirements for a cluster associated with a custom key store. To view the cluster certificate of a cluster, use the DescribeClusters operation.

" + "documentation":"

Associates the custom key store with a related CloudHSM cluster. This parameter is valid only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.

Enter the cluster ID of the cluster that you used to create the custom key store or a cluster that shares a backup history and has the same cluster certificate as the original cluster. You cannot use this parameter to associate a custom key store with an unrelated cluster. In addition, the replacement cluster must fulfill the requirements for a cluster associated with a custom key store. To view the cluster certificate of a cluster, use the DescribeClusters operation.

To change this value, the CloudHSM key store must be disconnected.

" + }, + "XksProxyUriEndpoint":{ + "shape":"XksProxyUriEndpointType", + "documentation":"

Changes the URI endpoint that KMS uses to connect to your external key store proxy (XKS proxy). This parameter is valid only for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

For external key stores with an XksProxyConnectivity value of PUBLIC_ENDPOINT, the protocol must be HTTPS.

For external key stores with an XksProxyConnectivity value of VPC_ENDPOINT_SERVICE, specify https:// followed by the private DNS name associated with the VPC endpoint service. Each external key store must use a different private DNS name.

The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique in the Amazon Web Services account and Region.

To change this value, the external key store must be disconnected.

" + }, + "XksProxyUriPath":{ + "shape":"XksProxyUriPathType", + "documentation":"

Changes the base path to the proxy APIs for this external key store. To find this value, see the documentation for your external key manager and external key store proxy (XKS proxy). This parameter is valid only for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

The value must start with / and must end with /kms/xks/v1, where v1 represents the version of the KMS external key store proxy API. You can include an optional prefix between the required elements such as /example/kms/xks/v1.

The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique in the Amazon Web Services account and Region.

You can change this value when the external key store is connected or disconnected.

" + }, + "XksProxyVpcEndpointServiceName":{ + "shape":"XksProxyVpcEndpointServiceNameType", + "documentation":"

Changes the name that KMS uses to identify the Amazon VPC endpoint service for your external key store proxy (XKS proxy). This parameter is valid when the CustomKeyStoreType is EXTERNAL_KEY_STORE and the XksProxyConnectivity is VPC_ENDPOINT_SERVICE.

To change this value, the external key store must be disconnected.

" + }, + "XksProxyAuthenticationCredential":{ + "shape":"XksProxyAuthenticationCredentialType", + "documentation":"

Changes the credentials that KMS uses to sign requests to the external key store proxy (XKS proxy). This parameter is valid only for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

You must specify both the AccessKeyId and SecretAccessKey value in the authentication credential, even if you are only updating one value.

This parameter doesn't establish or change your authentication credentials on the proxy. It just tells KMS the credential that you established with your external key store proxy. For example, if you rotate the credential on your external key store proxy, you can use this parameter to update the credential in KMS.

You can change this value when the external key store is connected or disconnected.

" + }, + "XksProxyConnectivity":{ + "shape":"XksProxyConnectivityType", + "documentation":"

Changes the connectivity setting for the external key store. To indicate that the external key store proxy uses a Amazon VPC endpoint service to communicate with KMS, specify VPC_ENDPOINT_SERVICE. Otherwise, specify PUBLIC_ENDPOINT.

If you change the XksProxyConnectivity to VPC_ENDPOINT_SERVICE, you must also change the XksProxyUriEndpoint and add an XksProxyVpcEndpointServiceName value.

If you change the XksProxyConnectivity to PUBLIC_ENDPOINT, you must also change the XksProxyUriEndpoint and specify a null or empty string for the XksProxyVpcEndpointServiceName value.

To change this value, the external key store must be disconnected.

" } } }, @@ -3396,7 +3495,202 @@ "WrappingKeySpec":{ "type":"string", "enum":["RSA_2048"] + }, + "XksKeyAlreadyInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because the (XksKeyId) is already associated with a KMS key in this external key store. Each KMS key in an external key store must be associated with a different external key.

", + "exception":true + }, + "XksKeyConfigurationType":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"XksKeyIdType", + "documentation":"

The ID of the external key in its external key manager. This is the ID that the external key store proxy uses to identify the external key.

" + } + }, + "documentation":"

Information about the external key that is associated with a KMS key in an external key store.

These fields appear in a CreateKey or DescribeKey response only for a KMS key in an external key store.

The external key is a symmetric encryption key that is hosted by an external key manager outside of Amazon Web Services. When you use the KMS key in an external key store in a cryptographic operation, the cryptographic operation is performed in the external key manager using the specified external key. For more information, see External key in the Key Management Service Developer Guide.

" + }, + "XksKeyIdType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9-_.]+$" + }, + "XksKeyInvalidConfigurationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because the external key specified by the XksKeyId parameter did not meet the configuration requirements for an external key store.

The external key must be an AES-256 symmetric key that is enabled and performs encryption and decryption.

", + "exception":true + }, + "XksKeyNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because the external key store proxy could not find the external key. This exception is thrown when the value of the XksKeyId parameter doesn't identify a key in the external key manager associated with the external key proxy.

Verify that the XksKeyId represents an existing key in the external key manager. Use the key identifier that the external key store proxy uses to identify the key. For details, see the documentation provided with your external key store proxy or key manager.

", + "exception":true + }, + "XksProxyAuthenticationAccessKeyIdType":{ + "type":"string", + "max":30, + "min":20, + "pattern":"^[A-Z2-7]+$", + "sensitive":true + }, + "XksProxyAuthenticationCredentialType":{ + "type":"structure", + "required":[ + "AccessKeyId", + "RawSecretAccessKey" + ], + "members":{ + "AccessKeyId":{ + "shape":"XksProxyAuthenticationAccessKeyIdType", + "documentation":"

A unique identifier for the raw secret access key.

" + }, + "RawSecretAccessKey":{ + "shape":"XksProxyAuthenticationRawSecretAccessKeyType", + "documentation":"

A secret string of 43-64 characters. Valid characters are a-z, A-Z, 0-9, /, +, and =.

" + } + }, + "documentation":"

KMS uses the authentication credential to sign requests that it sends to the external key store proxy (XKS proxy) on your behalf. You establish these credentials on your external key store proxy and report them to KMS.

The XksProxyAuthenticationCredential includes two required elements.

" + }, + "XksProxyAuthenticationRawSecretAccessKeyType":{ + "type":"string", + "max":64, + "min":43, + "pattern":"^[a-zA-Z0-9\\/+=]+$", + "sensitive":true + }, + "XksProxyConfigurationType":{ + "type":"structure", + "members":{ + "Connectivity":{ + "shape":"XksProxyConnectivityType", + "documentation":"

Indicates whether the external key store proxy uses a public endpoint or an Amazon VPC endpoint service to communicate with KMS.

" + }, + "AccessKeyId":{ + "shape":"XksProxyAuthenticationAccessKeyIdType", + "documentation":"

The part of the external key store proxy authentication credential that uniquely identifies the secret access key.

" + }, + "UriEndpoint":{ + "shape":"XksProxyUriEndpointType", + "documentation":"

The URI endpoint for the external key store proxy.

If the external key store proxy has a public endpoint, it is displayed here.

If the external key store proxy uses an Amazon VPC endpoint service name, this field displays the private DNS name associated with the VPC endpoint service.

" + }, + "UriPath":{ + "shape":"XksProxyUriPathType", + "documentation":"

The path to the external key store proxy APIs.

" + }, + "VpcEndpointServiceName":{ + "shape":"XksProxyVpcEndpointServiceNameType", + "documentation":"

The Amazon VPC endpoint service used to communicate with the external key store proxy. This field appears only when the external key store proxy uses an Amazon VPC endpoint service to communicate with KMS.

" + } + }, + "documentation":"

Detailed information about the external key store proxy (XKS proxy). Your external key store proxy translates KMS requests into a format that your external key manager can understand. These fields appear in a DescribeCustomKeyStores response only when the CustomKeyStoreType is EXTERNAL_KEY_STORE.

" + }, + "XksProxyConnectivityType":{ + "type":"string", + "enum":[ + "PUBLIC_ENDPOINT", + "VPC_ENDPOINT_SERVICE" + ] + }, + "XksProxyIncorrectAuthenticationCredentialException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because the proxy credentials failed to authenticate to the specified external key store proxy. The specified external key store proxy rejected a status request from KMS due to invalid credentials. This can indicate an error in the credentials or in the identification of the external key store proxy.

", + "exception":true + }, + "XksProxyInvalidConfigurationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because the Amazon VPC endpoint service configuration does not fulfill the requirements for an external key store proxy. For details, see the exception message.

", + "exception":true + }, + "XksProxyInvalidResponseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

KMS cannot interpret the response it received from the external key store proxy. The problem might be a poorly constructed response, but it could also be a transient network issue. If you see this error repeatedly, report it to the proxy vendor.

", + "exception":true + }, + "XksProxyUriEndpointInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because the concatenation of the XksProxyUriEndpoint is already associated with an external key store in the Amazon Web Services account and Region. Each external key store in an account and Region must use a unique external key store proxy address.

", + "exception":true + }, + "XksProxyUriEndpointType":{ + "type":"string", + "max":128, + "min":10, + "pattern":"^https://[a-zA-Z0-9.-]+$" + }, + "XksProxyUriInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because the concatenation of the XksProxyUriEndpoint and XksProxyUriPath is already associated with an external key store in the Amazon Web Services account and Region. Each external key store in an account and Region must use a unique external key store proxy API address.

", + "exception":true + }, + "XksProxyUriPathType":{ + "type":"string", + "max":128, + "min":10, + "pattern":"^(/[a-zA-Z0-9\\/_-]+/kms/xks/v\\d{1,2})$|^(/kms/xks/v\\d{1,2})$" + }, + "XksProxyUriUnreachableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

KMS was unable to reach the specified XksProxyUriPath. The path must be reachable before you create the external key store or update its settings.

This exception is also thrown when the external key store proxy response to a GetHealthStatus request indicates that all external key manager instances are unavailable.

", + "exception":true + }, + "XksProxyVpcEndpointServiceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because the specified Amazon VPC endpoint service is already associated with an external key store in the Amazon Web Services account and Region. Each external key store in an Amazon Web Services account and Region must use a different Amazon VPC endpoint service.

", + "exception":true + }, + "XksProxyVpcEndpointServiceInvalidConfigurationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because the Amazon VPC endpoint service configuration does not fulfill the requirements for an external key store proxy. For details, see the exception message and review the requirements for Amazon VPC endpoint service connectivity for an external key store.

", + "exception":true + }, + "XksProxyVpcEndpointServiceNameType":{ + "type":"string", + "max":64, + "min":20, + "pattern":"^com\\.amazonaws\\.vpce\\.([a-z]+-){2,3}\\d+\\.vpce-svc-[0-9a-z]+$" + }, + "XksProxyVpcEndpointServiceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "documentation":"

The request was rejected because KMS could not find the specified VPC endpoint service. Use DescribeCustomKeyStores to verify the VPC endpoint service name for the external key store. Also, confirm that the Allow principals list for the VPC endpoint service includes the KMS service principal for the Region, such as cks.kms.us-east-1.amazonaws.com.

", + "exception":true } }, - "documentation":"Key Management Service

Key Management Service (KMS) is an encryption and key management web service. This guide describes the KMS operations that you can call programmatically. For general information about KMS, see the Key Management Service Developer Guide .

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

Amazon Web Services provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to KMS and other Amazon Web Services services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the Amazon Web Services SDKs to make programmatic API calls to KMS.

If you need to use FIPS 140-2 validated cryptographic modules when communicating with Amazon Web Services, use the FIPS endpoint in your preferred Amazon Web Services Region. For more information about the available FIPS endpoints, see Service endpoints in the Key Management Service topic of the Amazon Web Services General Reference.

All KMS API calls must be signed and be transmitted using Transport Layer Security (TLS). KMS recommends you always use the latest supported TLS version. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

Signing Requests

Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your Amazon Web Services account (root) access key ID and secret key for everyday work with KMS. Instead, use the access key ID and secret access key for an IAM user. You can also use the Amazon Web Services Security Token Service to generate temporary security credentials that you can use to sign requests.

All KMS operations require Signature Version 4.

Logging API Requests

KMS supports CloudTrail, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

Additional Resources

For more information about credentials and request signing, see the following:

Commonly Used API Operations

Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console.

" + "documentation":"Key Management Service

Key Management Service (KMS) is an encryption and key management web service. This guide describes the KMS operations that you can call programmatically. For general information about KMS, see the Key Management Service Developer Guide .

KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

Amazon Web Services provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to KMS and other Amazon Web Services services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the Amazon Web Services SDKs to make programmatic API calls to KMS.

If you need to use FIPS 140-2 validated cryptographic modules when communicating with Amazon Web Services, use the FIPS endpoint in your preferred Amazon Web Services Region. For more information about the available FIPS endpoints, see Service endpoints in the Key Management Service topic of the Amazon Web Services General Reference.

All KMS API calls must be signed and be transmitted using Transport Layer Security (TLS). KMS recommends you always use the latest supported TLS version. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

Signing Requests

Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your Amazon Web Services account (root) access key ID and secret access key for everyday work with KMS. Instead, use the access key ID and secret access key for an IAM user. You can also use the Amazon Web Services Security Token Service to generate temporary security credentials that you can use to sign requests.

All KMS operations require Signature Version 4.

Logging API Requests

KMS supports CloudTrail, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

Additional Resources

For more information about credentials and request signing, see the following:

Commonly Used API Operations

Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console.

" } diff --git a/botocore/data/omics/2022-11-28/endpoint-rule-set-1.json b/botocore/data/omics/2022-11-28/endpoint-rule-set-1.json new file mode 100644 index 0000000000..8c1b3668fb --- /dev/null +++ b/botocore/data/omics/2022-11-28/endpoint-rule-set-1.json @@ -0,0 +1,309 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": true, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://omics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://omics-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://omics.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://omics.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] +} \ No newline at end of file diff --git a/botocore/data/omics/2022-11-28/paginators-1.json b/botocore/data/omics/2022-11-28/paginators-1.json new file mode 100644 index 0000000000..ea92fb1c53 --- /dev/null +++ b/botocore/data/omics/2022-11-28/paginators-1.json @@ -0,0 +1,100 @@ +{ + "pagination": { + "ListAnnotationImportJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "annotationImportJobs" + }, + "ListAnnotationStores": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "annotationStores" + }, + "ListReadSetActivationJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "activationJobs" + }, + "ListReadSetExportJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "exportJobs" + }, + "ListReadSetImportJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "importJobs" + }, + "ListReadSets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "readSets" + }, + "ListReferenceImportJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "importJobs" + }, + "ListReferenceStores": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "referenceStores" + }, + "ListReferences": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "references" + }, + "ListRunGroups": { + "input_token": "startingToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListRunTasks": { + "input_token": "startingToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListRuns": { + "input_token": "startingToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListSequenceStores": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "sequenceStores" + }, + "ListVariantImportJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "variantImportJobs" + }, + "ListVariantStores": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "variantStores" + }, + "ListWorkflows": { + "input_token": "startingToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + } + } +} diff --git a/botocore/data/omics/2022-11-28/service-2.json b/botocore/data/omics/2022-11-28/service-2.json new file mode 100644 index 0000000000..1718af1efc --- /dev/null +++ b/botocore/data/omics/2022-11-28/service-2.json @@ -0,0 +1,7338 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2022-11-28", + "endpointPrefix":"omics", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Omics", + "serviceId":"Omics", + "signatureVersion":"v4", + "signingName":"omics", + "uid":"omics-2022-11-28" + }, + "operations":{ + "BatchDeleteReadSet":{ + "name":"BatchDeleteReadSet", + "http":{ + "method":"POST", + "requestUri":"/sequencestore/{sequenceStoreId}/readset/batch/delete", + "responseCode":200 + }, + "input":{"shape":"BatchDeleteReadSetRequest"}, + "output":{"shape":"BatchDeleteReadSetResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Deletes one or more read sets.

", + "endpoint":{"hostPrefix":"control-storage-"}, + "idempotent":true + }, + "CancelAnnotationImportJob":{ + "name":"CancelAnnotationImportJob", + "http":{ + "method":"DELETE", + "requestUri":"/import/annotation/{jobId}", + "responseCode":200 + }, + "input":{"shape":"CancelAnnotationImportRequest"}, + "output":{"shape":"CancelAnnotationImportResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Cancels an annotation import job.

", + "endpoint":{"hostPrefix":"analytics-"}, + "idempotent":true + }, + "CancelRun":{ + "name":"CancelRun", + "http":{ + "method":"POST", + "requestUri":"/run/{id}/cancel", + "responseCode":202 + }, + "input":{"shape":"CancelRunRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Cancels a run.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "CancelVariantImportJob":{ + "name":"CancelVariantImportJob", + "http":{ + "method":"DELETE", + "requestUri":"/import/variant/{jobId}", + "responseCode":200 + }, + "input":{"shape":"CancelVariantImportRequest"}, + "output":{"shape":"CancelVariantImportResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Cancels a variant import job.

", + "endpoint":{"hostPrefix":"analytics-"}, + "idempotent":true + }, + "CreateAnnotationStore":{ + "name":"CreateAnnotationStore", + "http":{ + "method":"POST", + "requestUri":"/annotationStore", + "responseCode":200 + }, + "input":{"shape":"CreateAnnotationStoreRequest"}, + "output":{"shape":"CreateAnnotationStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates an annotation store.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "CreateReferenceStore":{ + "name":"CreateReferenceStore", + "http":{ + "method":"POST", + "requestUri":"/referencestore", + "responseCode":200 + }, + "input":{"shape":"CreateReferenceStoreRequest"}, + "output":{"shape":"CreateReferenceStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Creates a reference store.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "CreateRunGroup":{ + "name":"CreateRunGroup", + "http":{ + "method":"POST", + "requestUri":"/runGroup", + "responseCode":201 + }, + "input":{"shape":"CreateRunGroupRequest"}, + "output":{"shape":"CreateRunGroupResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Creates a run group.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "CreateSequenceStore":{ + "name":"CreateSequenceStore", + "http":{ + "method":"POST", + "requestUri":"/sequencestore", + "responseCode":200 + }, + "input":{"shape":"CreateSequenceStoreRequest"}, + "output":{"shape":"CreateSequenceStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Creates a sequence store.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "CreateVariantStore":{ + "name":"CreateVariantStore", + "http":{ + "method":"POST", + "requestUri":"/variantStore", + "responseCode":200 + }, + "input":{"shape":"CreateVariantStoreRequest"}, + "output":{"shape":"CreateVariantStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a variant store.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "CreateWorkflow":{ + "name":"CreateWorkflow", + "http":{ + "method":"POST", + "requestUri":"/workflow", + "responseCode":201 + }, + "input":{"shape":"CreateWorkflowRequest"}, + "output":{"shape":"CreateWorkflowResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Creates a workflow.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "DeleteAnnotationStore":{ + "name":"DeleteAnnotationStore", + "http":{ + "method":"DELETE", + "requestUri":"/annotationStore/{name}", + "responseCode":200 + }, + "input":{"shape":"DeleteAnnotationStoreRequest"}, + "output":{"shape":"DeleteAnnotationStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes an annotation store.

", + "endpoint":{"hostPrefix":"analytics-"}, + "idempotent":true + }, + "DeleteReference":{ + "name":"DeleteReference", + "http":{ + "method":"DELETE", + "requestUri":"/referencestore/{referenceStoreId}/reference/{id}", + "responseCode":200 + }, + "input":{"shape":"DeleteReferenceRequest"}, + "output":{"shape":"DeleteReferenceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Deletes a genome reference.

", + "endpoint":{"hostPrefix":"control-storage-"}, + "idempotent":true + }, + "DeleteReferenceStore":{ + "name":"DeleteReferenceStore", + "http":{ + "method":"DELETE", + "requestUri":"/referencestore/{id}", + "responseCode":200 + }, + "input":{"shape":"DeleteReferenceStoreRequest"}, + "output":{"shape":"DeleteReferenceStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Deletes a genome reference store.

", + "endpoint":{"hostPrefix":"control-storage-"}, + "idempotent":true + }, + "DeleteRun":{ + "name":"DeleteRun", + "http":{ + "method":"DELETE", + "requestUri":"/run/{id}", + "responseCode":202 + }, + "input":{"shape":"DeleteRunRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Deletes a workflow run.

", + "endpoint":{"hostPrefix":"workflows-"}, + "idempotent":true + }, + "DeleteRunGroup":{ + "name":"DeleteRunGroup", + "http":{ + "method":"DELETE", + "requestUri":"/runGroup/{id}", + "responseCode":202 + }, + "input":{"shape":"DeleteRunGroupRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Deletes a workflow run group.

", + "endpoint":{"hostPrefix":"workflows-"}, + "idempotent":true + }, + "DeleteSequenceStore":{ + "name":"DeleteSequenceStore", + "http":{ + "method":"DELETE", + "requestUri":"/sequencestore/{id}", + "responseCode":200 + }, + "input":{"shape":"DeleteSequenceStoreRequest"}, + "output":{"shape":"DeleteSequenceStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Deletes a sequence store.

", + "endpoint":{"hostPrefix":"control-storage-"}, + "idempotent":true + }, + "DeleteVariantStore":{ + "name":"DeleteVariantStore", + "http":{ + "method":"DELETE", + "requestUri":"/variantStore/{name}", + "responseCode":200 + }, + "input":{"shape":"DeleteVariantStoreRequest"}, + "output":{"shape":"DeleteVariantStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes a variant store.

", + "endpoint":{"hostPrefix":"analytics-"}, + "idempotent":true + }, + "DeleteWorkflow":{ + "name":"DeleteWorkflow", + "http":{ + "method":"DELETE", + "requestUri":"/workflow/{id}", + "responseCode":202 + }, + "input":{"shape":"DeleteWorkflowRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Deletes a workflow.

", + "endpoint":{"hostPrefix":"workflows-"}, + "idempotent":true + }, + "GetAnnotationImportJob":{ + "name":"GetAnnotationImportJob", + "http":{ + "method":"GET", + "requestUri":"/import/annotation/{jobId}", + "responseCode":200 + }, + "input":{"shape":"GetAnnotationImportRequest"}, + "output":{"shape":"GetAnnotationImportResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Gets information about an annotation import job.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "GetAnnotationStore":{ + "name":"GetAnnotationStore", + "http":{ + "method":"GET", + "requestUri":"/annotationStore/{name}", + "responseCode":200 + }, + "input":{"shape":"GetAnnotationStoreRequest"}, + "output":{"shape":"GetAnnotationStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Gets information about an annotation store.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "GetReadSet":{ + "name":"GetReadSet", + "http":{ + "method":"GET", + "requestUri":"/sequencestore/{sequenceStoreId}/readset/{id}", + "responseCode":200 + }, + "input":{"shape":"GetReadSetRequest"}, + "output":{"shape":"GetReadSetResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"RangeNotSatisfiableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets a file from a read set.

", + "endpoint":{"hostPrefix":"storage-"} + }, + "GetReadSetActivationJob":{ + "name":"GetReadSetActivationJob", + "http":{ + "method":"GET", + "requestUri":"/sequencestore/{sequenceStoreId}/activationjob/{id}", + "responseCode":200 + }, + "input":{"shape":"GetReadSetActivationJobRequest"}, + "output":{"shape":"GetReadSetActivationJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a read set activation job.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "GetReadSetExportJob":{ + "name":"GetReadSetExportJob", + "http":{ + "method":"GET", + "requestUri":"/sequencestore/{sequenceStoreId}/exportjob/{id}", + "responseCode":200 + }, + "input":{"shape":"GetReadSetExportJobRequest"}, + "output":{"shape":"GetReadSetExportJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a read set export job.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "GetReadSetImportJob":{ + "name":"GetReadSetImportJob", + "http":{ + "method":"GET", + "requestUri":"/sequencestore/{sequenceStoreId}/importjob/{id}", + "responseCode":200 + }, + "input":{"shape":"GetReadSetImportJobRequest"}, + "output":{"shape":"GetReadSetImportJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a read set import job.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "GetReadSetMetadata":{ + "name":"GetReadSetMetadata", + "http":{ + "method":"GET", + "requestUri":"/sequencestore/{sequenceStoreId}/readset/{id}/metadata", + "responseCode":200 + }, + "input":{"shape":"GetReadSetMetadataRequest"}, + "output":{"shape":"GetReadSetMetadataResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets details about a read set.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "GetReference":{ + "name":"GetReference", + "http":{ + "method":"GET", + "requestUri":"/referencestore/{referenceStoreId}/reference/{id}", + "responseCode":200 + }, + "input":{"shape":"GetReferenceRequest"}, + "output":{"shape":"GetReferenceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"RangeNotSatisfiableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets a reference file.

", + "endpoint":{"hostPrefix":"storage-"} + }, + "GetReferenceImportJob":{ + "name":"GetReferenceImportJob", + "http":{ + "method":"GET", + "requestUri":"/referencestore/{referenceStoreId}/importjob/{id}", + "responseCode":200 + }, + "input":{"shape":"GetReferenceImportJobRequest"}, + "output":{"shape":"GetReferenceImportJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a reference import job.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "GetReferenceMetadata":{ + "name":"GetReferenceMetadata", + "http":{ + "method":"GET", + "requestUri":"/referencestore/{referenceStoreId}/reference/{id}/metadata", + "responseCode":200 + }, + "input":{"shape":"GetReferenceMetadataRequest"}, + "output":{"shape":"GetReferenceMetadataResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a genome reference's metadata.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "GetReferenceStore":{ + "name":"GetReferenceStore", + "http":{ + "method":"GET", + "requestUri":"/referencestore/{id}", + "responseCode":200 + }, + "input":{"shape":"GetReferenceStoreRequest"}, + "output":{"shape":"GetReferenceStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a reference store.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "GetRun":{ + "name":"GetRun", + "http":{ + "method":"GET", + "requestUri":"/run/{id}", + "responseCode":200 + }, + "input":{"shape":"GetRunRequest"}, + "output":{"shape":"GetRunResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a workflow run.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "GetRunGroup":{ + "name":"GetRunGroup", + "http":{ + "method":"GET", + "requestUri":"/runGroup/{id}", + "responseCode":200 + }, + "input":{"shape":"GetRunGroupRequest"}, + "output":{"shape":"GetRunGroupResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a workflow run group.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "GetRunTask":{ + "name":"GetRunTask", + "http":{ + "method":"GET", + "requestUri":"/run/{id}/task/{taskId}", + "responseCode":200 + }, + "input":{"shape":"GetRunTaskRequest"}, + "output":{"shape":"GetRunTaskResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a workflow run task.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "GetSequenceStore":{ + "name":"GetSequenceStore", + "http":{ + "method":"GET", + "requestUri":"/sequencestore/{id}", + "responseCode":200 + }, + "input":{"shape":"GetSequenceStoreRequest"}, + "output":{"shape":"GetSequenceStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a sequence store.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "GetVariantImportJob":{ + "name":"GetVariantImportJob", + "http":{ + "method":"GET", + "requestUri":"/import/variant/{jobId}", + "responseCode":200 + }, + "input":{"shape":"GetVariantImportRequest"}, + "output":{"shape":"GetVariantImportResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Gets information about a variant import job.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "GetVariantStore":{ + "name":"GetVariantStore", + "http":{ + "method":"GET", + "requestUri":"/variantStore/{name}", + "responseCode":200 + }, + "input":{"shape":"GetVariantStoreRequest"}, + "output":{"shape":"GetVariantStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Gets information about a variant store.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "GetWorkflow":{ + "name":"GetWorkflow", + "http":{ + "method":"GET", + "requestUri":"/workflow/{id}", + "responseCode":200 + }, + "input":{"shape":"GetWorkflowRequest"}, + "output":{"shape":"GetWorkflowResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Gets information about a workflow.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "ListAnnotationImportJobs":{ + "name":"ListAnnotationImportJobs", + "http":{ + "method":"POST", + "requestUri":"/import/annotations", + "responseCode":200 + }, + "input":{"shape":"ListAnnotationImportJobsRequest"}, + "output":{"shape":"ListAnnotationImportJobsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves a list of annotation import jobs.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "ListAnnotationStores":{ + "name":"ListAnnotationStores", + "http":{ + "method":"POST", + "requestUri":"/annotationStores", + "responseCode":200 + }, + "input":{"shape":"ListAnnotationStoresRequest"}, + "output":{"shape":"ListAnnotationStoresResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves a list of annotation stores.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "ListReadSetActivationJobs":{ + "name":"ListReadSetActivationJobs", + "http":{ + "method":"POST", + "requestUri":"/sequencestore/{sequenceStoreId}/activationjobs", + "responseCode":200 + }, + "input":{"shape":"ListReadSetActivationJobsRequest"}, + "output":{"shape":"ListReadSetActivationJobsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of read set activation jobs.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "ListReadSetExportJobs":{ + "name":"ListReadSetExportJobs", + "http":{ + "method":"POST", + "requestUri":"/sequencestore/{sequenceStoreId}/exportjobs", + "responseCode":200 + }, + "input":{"shape":"ListReadSetExportJobsRequest"}, + "output":{"shape":"ListReadSetExportJobsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of read set export jobs.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "ListReadSetImportJobs":{ + "name":"ListReadSetImportJobs", + "http":{ + "method":"POST", + "requestUri":"/sequencestore/{sequenceStoreId}/importjobs", + "responseCode":200 + }, + "input":{"shape":"ListReadSetImportJobsRequest"}, + "output":{"shape":"ListReadSetImportJobsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of read set import jobs.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "ListReadSets":{ + "name":"ListReadSets", + "http":{ + "method":"POST", + "requestUri":"/sequencestore/{sequenceStoreId}/readsets", + "responseCode":200 + }, + "input":{"shape":"ListReadSetsRequest"}, + "output":{"shape":"ListReadSetsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of read sets.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "ListReferenceImportJobs":{ + "name":"ListReferenceImportJobs", + "http":{ + "method":"POST", + "requestUri":"/referencestore/{referenceStoreId}/importjobs", + "responseCode":200 + }, + "input":{"shape":"ListReferenceImportJobsRequest"}, + "output":{"shape":"ListReferenceImportJobsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of reference import jobs.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "ListReferenceStores":{ + "name":"ListReferenceStores", + "http":{ + "method":"POST", + "requestUri":"/referencestores", + "responseCode":200 + }, + "input":{"shape":"ListReferenceStoresRequest"}, + "output":{"shape":"ListReferenceStoresResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of reference stores.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "ListReferences":{ + "name":"ListReferences", + "http":{ + "method":"POST", + "requestUri":"/referencestore/{referenceStoreId}/references", + "responseCode":200 + }, + "input":{"shape":"ListReferencesRequest"}, + "output":{"shape":"ListReferencesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of references.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "ListRunGroups":{ + "name":"ListRunGroups", + "http":{ + "method":"GET", + "requestUri":"/runGroup", + "responseCode":200 + }, + "input":{"shape":"ListRunGroupsRequest"}, + "output":{"shape":"ListRunGroupsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of run groups.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "ListRunTasks":{ + "name":"ListRunTasks", + "http":{ + "method":"GET", + "requestUri":"/run/{id}/task", + "responseCode":200 + }, + "input":{"shape":"ListRunTasksRequest"}, + "output":{"shape":"ListRunTasksResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of tasks for a run.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "ListRuns":{ + "name":"ListRuns", + "http":{ + "method":"GET", + "requestUri":"/run", + "responseCode":200 + }, + "input":{"shape":"ListRunsRequest"}, + "output":{"shape":"ListRunsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of runs.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "ListSequenceStores":{ + "name":"ListSequenceStores", + "http":{ + "method":"POST", + "requestUri":"/sequencestores", + "responseCode":200 + }, + "input":{"shape":"ListSequenceStoresRequest"}, + "output":{"shape":"ListSequenceStoresResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of sequence stores.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of tags for a resource.

", + "endpoint":{"hostPrefix":"tags-"} + }, + "ListVariantImportJobs":{ + "name":"ListVariantImportJobs", + "http":{ + "method":"POST", + "requestUri":"/import/variants", + "responseCode":200 + }, + "input":{"shape":"ListVariantImportJobsRequest"}, + "output":{"shape":"ListVariantImportJobsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves a list of variant import jobs.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "ListVariantStores":{ + "name":"ListVariantStores", + "http":{ + "method":"POST", + "requestUri":"/variantStores", + "responseCode":200 + }, + "input":{"shape":"ListVariantStoresRequest"}, + "output":{"shape":"ListVariantStoresResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves a list of variant stores.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "ListWorkflows":{ + "name":"ListWorkflows", + "http":{ + "method":"GET", + "requestUri":"/workflow", + "responseCode":200 + }, + "input":{"shape":"ListWorkflowsRequest"}, + "output":{"shape":"ListWorkflowsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Retrieves a list of workflows.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "StartAnnotationImportJob":{ + "name":"StartAnnotationImportJob", + "http":{ + "method":"POST", + "requestUri":"/import/annotation", + "responseCode":200 + }, + "input":{"shape":"StartAnnotationImportRequest"}, + "output":{"shape":"StartAnnotationImportResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Starts an annotation import job.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "StartReadSetActivationJob":{ + "name":"StartReadSetActivationJob", + "http":{ + "method":"POST", + "requestUri":"/sequencestore/{sequenceStoreId}/activationjob", + "responseCode":200 + }, + "input":{"shape":"StartReadSetActivationJobRequest"}, + "output":{"shape":"StartReadSetActivationJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Starts a read set activation job.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "StartReadSetExportJob":{ + "name":"StartReadSetExportJob", + "http":{ + "method":"POST", + "requestUri":"/sequencestore/{sequenceStoreId}/exportjob", + "responseCode":200 + }, + "input":{"shape":"StartReadSetExportJobRequest"}, + "output":{"shape":"StartReadSetExportJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Starts a read set export job.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "StartReadSetImportJob":{ + "name":"StartReadSetImportJob", + "http":{ + "method":"POST", + "requestUri":"/sequencestore/{sequenceStoreId}/importjob", + "responseCode":200 + }, + "input":{"shape":"StartReadSetImportJobRequest"}, + "output":{"shape":"StartReadSetImportJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Starts a read set import job.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "StartReferenceImportJob":{ + "name":"StartReferenceImportJob", + "http":{ + "method":"POST", + "requestUri":"/referencestore/{referenceStoreId}/importjob", + "responseCode":200 + }, + "input":{"shape":"StartReferenceImportJobRequest"}, + "output":{"shape":"StartReferenceImportJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Starts a reference import job.

", + "endpoint":{"hostPrefix":"control-storage-"} + }, + "StartRun":{ + "name":"StartRun", + "http":{ + "method":"POST", + "requestUri":"/run", + "responseCode":201 + }, + "input":{"shape":"StartRunRequest"}, + "output":{"shape":"StartRunResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Starts a run.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "StartVariantImportJob":{ + "name":"StartVariantImportJob", + "http":{ + "method":"POST", + "requestUri":"/import/variant", + "responseCode":200 + }, + "input":{"shape":"StartVariantImportRequest"}, + "output":{"shape":"StartVariantImportResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Starts a variant import job.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Tags a resource.

", + "endpoint":{"hostPrefix":"tags-"} + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Removes tags from a resource.

", + "endpoint":{"hostPrefix":"tags-"}, + "idempotent":true + }, + "UpdateAnnotationStore":{ + "name":"UpdateAnnotationStore", + "http":{ + "method":"POST", + "requestUri":"/annotationStore/{name}", + "responseCode":200 + }, + "input":{"shape":"UpdateAnnotationStoreRequest"}, + "output":{"shape":"UpdateAnnotationStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Updates an annotation store.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "UpdateRunGroup":{ + "name":"UpdateRunGroup", + "http":{ + "method":"POST", + "requestUri":"/runGroup/{id}", + "responseCode":202 + }, + "input":{"shape":"UpdateRunGroupRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Updates a run group.

", + "endpoint":{"hostPrefix":"workflows-"} + }, + "UpdateVariantStore":{ + "name":"UpdateVariantStore", + "http":{ + "method":"POST", + "requestUri":"/variantStore/{name}", + "responseCode":200 + }, + "input":{"shape":"UpdateVariantStoreRequest"}, + "output":{"shape":"UpdateVariantStoreResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Updates a variant store.

", + "endpoint":{"hostPrefix":"analytics-"} + }, + "UpdateWorkflow":{ + "name":"UpdateWorkflow", + "http":{ + "method":"POST", + "requestUri":"/workflow/{id}", + "responseCode":202 + }, + "input":{"shape":"UpdateWorkflowRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

Updates a workflow.

", + "endpoint":{"hostPrefix":"workflows-"} + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "ActivateReadSetFilter":{ + "type":"structure", + "members":{ + "createdAfter":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's start date.

" + }, + "createdBefore":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's end date.

" + }, + "status":{ + "shape":"ReadSetActivationJobStatus", + "documentation":"

The filter's status.

" + } + }, + "documentation":"

A read set activation job filter.

" + }, + "ActivateReadSetJobItem":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "sequenceStoreId", + "status" + ], + "members":{ + "completionTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "id":{ + "shape":"ActivationJobId", + "documentation":"

The job's ID.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The job's sequence store ID.

" + }, + "status":{ + "shape":"ReadSetActivationJobStatus", + "documentation":"

The job's status.

" + } + }, + "documentation":"

A read set activation job.

" + }, + "ActivateReadSetJobList":{ + "type":"list", + "member":{"shape":"ActivateReadSetJobItem"} + }, + "ActivateReadSetSourceItem":{ + "type":"structure", + "required":[ + "readSetId", + "status" + ], + "members":{ + "readSetId":{ + "shape":"ReadSetId", + "documentation":"

The source's read set ID.

" + }, + "status":{ + "shape":"ReadSetActivationJobItemStatus", + "documentation":"

The source's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMessage", + "documentation":"

The source's status message.

" + } + }, + "documentation":"

A source for a read set activation job.

" + }, + "ActivateReadSetSourceList":{ + "type":"list", + "member":{"shape":"ActivateReadSetSourceItem"} + }, + "ActivationJobId":{ + "type":"string", + "max":36, + "min":10, + "pattern":"^[0-9]+$" + }, + "AnnotationImportItemDetail":{ + "type":"structure", + "required":[ + "jobStatus", + "source" + ], + "members":{ + "jobStatus":{ + "shape":"JobStatus", + "documentation":"

The item's job status.

" + }, + "source":{ + "shape":"S3Uri", + "documentation":"

The source file's location in Amazon S3.

" + } + }, + "documentation":"

Details about an imported annotation item.

" + }, + "AnnotationImportItemDetails":{ + "type":"list", + "member":{"shape":"AnnotationImportItemDetail"}, + "max":1, + "min":1 + }, + "AnnotationImportItemSource":{ + "type":"structure", + "required":["source"], + "members":{ + "source":{ + "shape":"S3Uri", + "documentation":"

The source file's location in Amazon S3.

" + } + }, + "documentation":"

A source for an annotation import job.

" + }, + "AnnotationImportItemSources":{ + "type":"list", + "member":{"shape":"AnnotationImportItemSource"}, + "max":1, + "min":1 + }, + "AnnotationImportJobItem":{ + "type":"structure", + "required":[ + "creationTime", + "destinationName", + "id", + "roleArn", + "status", + "updateTime" + ], + "members":{ + "completionTime":{ + "shape":"CompletionTime", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the job was created.

" + }, + "destinationName":{ + "shape":"String", + "documentation":"

The job's destination annotation store.

" + }, + "id":{ + "shape":"String", + "documentation":"

The job's ID.

" + }, + "roleArn":{ + "shape":"Arn", + "documentation":"

The job's service role ARN.

" + }, + "runLeftNormalization":{ + "shape":"RunLeftNormalization", + "documentation":"

The job's left normalization setting.

" + }, + "status":{ + "shape":"JobStatus", + "documentation":"

The job's status.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the job was updated.

" + } + }, + "documentation":"

An annotation import job.

" + }, + "AnnotationImportJobItems":{ + "type":"list", + "member":{"shape":"AnnotationImportJobItem"} + }, + "AnnotationStoreItem":{ + "type":"structure", + "required":[ + "creationTime", + "description", + "id", + "name", + "reference", + "sseConfig", + "status", + "statusMessage", + "storeArn", + "storeFormat", + "storeSizeBytes", + "updateTime" + ], + "members":{ + "creationTime":{ + "shape":"CreationTime", + "documentation":"

The store's creation time.

" + }, + "description":{ + "shape":"StoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"ResourceId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The store's genome reference.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's server-side encryption (SSE) settings.

" + }, + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

The store's status message.

" + }, + "storeArn":{ + "shape":"Arn", + "documentation":"

The store's ARN.

" + }, + "storeFormat":{ + "shape":"StoreFormat", + "documentation":"

The store's file format.

" + }, + "storeSizeBytes":{ + "shape":"Long", + "documentation":"

The store's size in bytes.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the store was updated.

" + } + }, + "documentation":"

An annotation store.

" + }, + "AnnotationStoreItems":{ + "type":"list", + "member":{"shape":"AnnotationStoreItem"} + }, + "AnnotationType":{ + "type":"string", + "enum":[ + "GENERIC", + "CHR_POS", + "CHR_POS_REF_ALT", + "CHR_START_END_ONE_BASE", + "CHR_START_END_REF_ALT_ONE_BASE", + "CHR_START_END_ZERO_BASE", + "CHR_START_END_REF_ALT_ZERO_BASE" + ] + }, + "Arn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:([^: ]*):([^: ]*):([^: ]*):([0-9]{12}):([^: ]*)$" + }, + "BatchDeleteReadSetRequest":{ + "type":"structure", + "required":[ + "ids", + "sequenceStoreId" + ], + "members":{ + "ids":{ + "shape":"ReadSetIdList", + "documentation":"

The read sets' IDs.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read sets' sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "BatchDeleteReadSetResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"ReadSetBatchErrorList", + "documentation":"

Errors returned by individual delete operations.

" + } + } + }, + "Blob":{"type":"blob"}, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CancelAnnotationImportRequest":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"ResourceId", + "documentation":"

The job's ID.

", + "location":"uri", + "locationName":"jobId" + } + } + }, + "CancelAnnotationImportResponse":{ + "type":"structure", + "members":{ + } + }, + "CancelRunRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"RunId", + "documentation":"

The run's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "CancelVariantImportRequest":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"ResourceId", + "documentation":"

The job's ID.

", + "location":"uri", + "locationName":"jobId" + } + } + }, + "CancelVariantImportResponse":{ + "type":"structure", + "members":{ + } + }, + "ClientToken":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "CommentChar":{ + "type":"string", + "max":1, + "min":1 + }, + "CompletionTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "ConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request cannot be applied to the target resource in its current state.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateAnnotationStoreRequest":{ + "type":"structure", + "required":["storeFormat"], + "members":{ + "description":{ + "shape":"StoreDescription", + "documentation":"

A description for the store.

" + }, + "name":{ + "shape":"CreateAnnotationStoreRequestNameString", + "documentation":"

A name for the store.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The genome reference for the store's annotations.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

Server-side encryption (SSE) settings for the store.

" + }, + "storeFormat":{ + "shape":"StoreFormat", + "documentation":"

The annotation file format of the store.

" + }, + "storeOptions":{ + "shape":"StoreOptions", + "documentation":"

File parsing options for the annotation store.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Tags for the store.

" + } + } + }, + "CreateAnnotationStoreRequestNameString":{ + "type":"string", + "pattern":"^([a-z]){1}([a-z0-9_]){2,254}$" + }, + "CreateAnnotationStoreResponse":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "name", + "status" + ], + "members":{ + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the store was created.

" + }, + "id":{ + "shape":"ResourceId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The store's genome reference.

" + }, + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + }, + "storeFormat":{ + "shape":"StoreFormat", + "documentation":"

The annotation file format of the store.

" + }, + "storeOptions":{ + "shape":"StoreOptions", + "documentation":"

The store's file parsing options.

" + } + } + }, + "CreateReferenceStoreRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

To ensure that requests don't run multiple times, specify a unique token for each request.

" + }, + "description":{ + "shape":"ReferenceStoreDescription", + "documentation":"

A description for the store.

" + }, + "name":{ + "shape":"ReferenceStoreName", + "documentation":"

A name for the store.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

Server-side encryption (SSE) settings for the store.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Tags for the store.

" + } + } + }, + "CreateReferenceStoreResponse":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "id" + ], + "members":{ + "arn":{ + "shape":"ReferenceStoreArn", + "documentation":"

The store's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"ReferenceStoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"ReferenceStoreId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"ReferenceStoreName", + "documentation":"

The store's name.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's SSE settings.

" + } + } + }, + "CreateRunGroupRequest":{ + "type":"structure", + "required":["requestId"], + "members":{ + "maxCpus":{ + "shape":"CreateRunGroupRequestMaxCpusInteger", + "documentation":"

The maximum number of CPUs to use in the group.

" + }, + "maxDuration":{ + "shape":"CreateRunGroupRequestMaxDurationInteger", + "documentation":"

A max duration for the group.

" + }, + "maxRuns":{ + "shape":"CreateRunGroupRequestMaxRunsInteger", + "documentation":"

The maximum number of concurrent runs for the group.

" + }, + "name":{ + "shape":"RunGroupName", + "documentation":"

A name for the group.

" + }, + "requestId":{ + "shape":"RunGroupRequestId", + "documentation":"

A request ID for the group.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Tags for the group.

" + } + } + }, + "CreateRunGroupRequestMaxCpusInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "CreateRunGroupRequestMaxDurationInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "CreateRunGroupRequestMaxRunsInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "CreateRunGroupResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"RunGroupArn", + "documentation":"

The group's ARN.

" + }, + "id":{ + "shape":"RunGroupId", + "documentation":"

The group's ID.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Tags for the run group.

" + } + } + }, + "CreateSequenceStoreRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

To ensure that requests don't run multiple times, specify a unique token for each request.

" + }, + "description":{ + "shape":"SequenceStoreDescription", + "documentation":"

A description for the store.

" + }, + "name":{ + "shape":"SequenceStoreName", + "documentation":"

A name for the store.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

Server-side encryption (SSE) settings for the store.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Tags for the store.

" + } + } + }, + "CreateSequenceStoreResponse":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "id" + ], + "members":{ + "arn":{ + "shape":"SequenceStoreArn", + "documentation":"

The store's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"SequenceStoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"SequenceStoreId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"SequenceStoreName", + "documentation":"

The store's name.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's SSE settings.

" + } + } + }, + "CreateVariantStoreRequest":{ + "type":"structure", + "required":["reference"], + "members":{ + "description":{ + "shape":"StoreDescription", + "documentation":"

A description for the store.

" + }, + "name":{ + "shape":"CreateVariantStoreRequestNameString", + "documentation":"

A name for the store.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The genome reference for the store's variants.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

Server-side encryption (SSE) settings for the store.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Tags for the store.

" + } + } + }, + "CreateVariantStoreRequestNameString":{ + "type":"string", + "pattern":"^([a-z]){1}([a-z0-9_]){2,254}$" + }, + "CreateVariantStoreResponse":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "name", + "status" + ], + "members":{ + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the store was created.

" + }, + "id":{ + "shape":"ResourceId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The store's genome reference.

" + }, + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + } + } + }, + "CreateWorkflowRequest":{ + "type":"structure", + "required":["requestId"], + "members":{ + "definitionUri":{ + "shape":"WorkflowDefinition", + "documentation":"

The URI of a definition for the workflow.

" + }, + "definitionZip":{ + "shape":"Blob", + "documentation":"

A ZIP archive for the workflow.

" + }, + "description":{ + "shape":"WorkflowDescription", + "documentation":"

A description for the workflow.

" + }, + "engine":{ + "shape":"WorkflowEngine", + "documentation":"

An engine for the workflow.

" + }, + "main":{ + "shape":"WorkflowMain", + "documentation":"

The path of the main definition file for the workflow.

" + }, + "name":{ + "shape":"WorkflowName", + "documentation":"

A name for the workflow.

" + }, + "parameterTemplate":{ + "shape":"WorkflowParameterTemplate", + "documentation":"

A parameter template for the workflow.

" + }, + "requestId":{ + "shape":"WorkflowRequestId", + "documentation":"

A request ID for the workflow.

", + "idempotencyToken":true + }, + "storageCapacity":{ + "shape":"CreateWorkflowRequestStorageCapacityInteger", + "documentation":"

A storage capacity for the workflow.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Tags for the workflow.

" + } + } + }, + "CreateWorkflowRequestStorageCapacityInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":0 + }, + "CreateWorkflowResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"WorkflowArn", + "documentation":"

The workflow's ARN.

" + }, + "id":{ + "shape":"WorkflowId", + "documentation":"

The workflow's ID.

" + }, + "status":{ + "shape":"WorkflowStatus", + "documentation":"

The workflow's status.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The workflow's tags.

" + } + } + }, + "CreationTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "DeleteAnnotationStoreRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "force":{ + "shape":"PrimitiveBoolean", + "documentation":"

Whether to force deletion.

", + "location":"querystring", + "locationName":"force" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteAnnotationStoreResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + } + } + }, + "DeleteReferenceRequest":{ + "type":"structure", + "required":[ + "id", + "referenceStoreId" + ], + "members":{ + "id":{ + "shape":"ReferenceId", + "documentation":"

The reference's ID.

", + "location":"uri", + "locationName":"id" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The reference's store ID.

", + "location":"uri", + "locationName":"referenceStoreId" + } + } + }, + "DeleteReferenceResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteReferenceStoreRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ReferenceStoreId", + "documentation":"

The store's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteReferenceStoreResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteRunGroupRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"RunGroupId", + "documentation":"

The run group's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteRunRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"RunId", + "documentation":"

The run's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteSequenceStoreRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"SequenceStoreId", + "documentation":"

The sequence store's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteSequenceStoreResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteVariantStoreRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "force":{ + "shape":"PrimitiveBoolean", + "documentation":"

Whether to force deletion.

", + "location":"querystring", + "locationName":"force" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteVariantStoreResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + } + } + }, + "DeleteWorkflowRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"WorkflowId", + "documentation":"

The workflow's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "Encoding":{ + "type":"string", + "max":20, + "min":1 + }, + "EncryptionType":{ + "type":"string", + "enum":["KMS"] + }, + "EscapeChar":{ + "type":"string", + "max":1, + "min":1 + }, + "EscapeQuotes":{"type":"boolean"}, + "ExportJobId":{ + "type":"string", + "max":36, + "min":10, + "pattern":"^[0-9]+$" + }, + "ExportReadSet":{ + "type":"structure", + "required":["readSetId"], + "members":{ + "readSetId":{ + "shape":"ReadSetId", + "documentation":"

The set's ID.

" + } + }, + "documentation":"

A read set.

" + }, + "ExportReadSetDetail":{ + "type":"structure", + "required":[ + "id", + "status" + ], + "members":{ + "id":{ + "shape":"ReadSetId", + "documentation":"

The set's ID.

" + }, + "status":{ + "shape":"ReadSetExportJobItemStatus", + "documentation":"

The set's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMessage", + "documentation":"

The set's status message.

" + } + }, + "documentation":"

Details about a read set.

" + }, + "ExportReadSetDetailList":{ + "type":"list", + "member":{"shape":"ExportReadSetDetail"} + }, + "ExportReadSetFilter":{ + "type":"structure", + "members":{ + "createdAfter":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's start date.

" + }, + "createdBefore":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's end date.

" + }, + "status":{ + "shape":"ReadSetExportJobStatus", + "documentation":"

A status to filter on.

" + } + }, + "documentation":"

An read set export job filter.

" + }, + "ExportReadSetJobDetail":{ + "type":"structure", + "required":[ + "creationTime", + "destination", + "id", + "sequenceStoreId", + "status" + ], + "members":{ + "completionTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "destination":{ + "shape":"S3Destination", + "documentation":"

The job's destination in Amazon S3.

" + }, + "id":{ + "shape":"ExportJobId", + "documentation":"

The job's ID.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The job's sequence store ID.

" + }, + "status":{ + "shape":"ReadSetExportJobStatus", + "documentation":"

The job's status.

" + } + }, + "documentation":"

Details about a read set export job.

" + }, + "ExportReadSetJobDetailList":{ + "type":"list", + "member":{"shape":"ExportReadSetJobDetail"} + }, + "FileInformation":{ + "type":"structure", + "members":{ + "contentLength":{ + "shape":"FileInformationContentLengthLong", + "documentation":"

The file's content length.

" + }, + "partSize":{ + "shape":"FileInformationPartSizeLong", + "documentation":"

The file's part size.

" + }, + "totalParts":{ + "shape":"FileInformationTotalPartsInteger", + "documentation":"

The file's total parts.

" + } + }, + "documentation":"

Details about a file.

" + }, + "FileInformationContentLengthLong":{ + "type":"long", + "box":true, + "max":5497558138880, + "min":1 + }, + "FileInformationPartSizeLong":{ + "type":"long", + "box":true, + "max":5368709120, + "min":1 + }, + "FileInformationTotalPartsInteger":{ + "type":"integer", + "box":true, + "max":10000, + "min":1 + }, + "FileType":{ + "type":"string", + "enum":[ + "FASTQ", + "BAM", + "CRAM" + ] + }, + "FormatOptions":{ + "type":"structure", + "members":{ + "tsvOptions":{ + "shape":"TsvOptions", + "documentation":"

Options for a TSV file.

" + }, + "vcfOptions":{ + "shape":"VcfOptions", + "documentation":"

Options for a VCF file.

" + } + }, + "documentation":"

Formatting options for a file.

", + "union":true + }, + "FormatToHeader":{ + "type":"map", + "key":{"shape":"FormatToHeaderKey"}, + "value":{"shape":"FormatToHeaderValueString"} + }, + "FormatToHeaderKey":{ + "type":"string", + "enum":[ + "CHR", + "START", + "END", + "REF", + "ALT", + "POS" + ] + }, + "FormatToHeaderValueString":{ + "type":"string", + "max":1000, + "min":0 + }, + "GeneratedFrom":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "GetAnnotationImportRequest":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"ResourceId", + "documentation":"

The job's ID.

", + "location":"uri", + "locationName":"jobId" + } + } + }, + "GetAnnotationImportResponse":{ + "type":"structure", + "required":[ + "completionTime", + "creationTime", + "destinationName", + "formatOptions", + "id", + "items", + "roleArn", + "runLeftNormalization", + "status", + "statusMessage", + "updateTime" + ], + "members":{ + "completionTime":{ + "shape":"CompletionTime", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the job was created.

" + }, + "destinationName":{ + "shape":"StoreName", + "documentation":"

The job's destination annotation store.

" + }, + "formatOptions":{"shape":"FormatOptions"}, + "id":{ + "shape":"ResourceId", + "documentation":"

The job's ID.

" + }, + "items":{ + "shape":"AnnotationImportItemDetails", + "documentation":"

The job's imported items.

" + }, + "roleArn":{ + "shape":"Arn", + "documentation":"

The job's service role ARN.

" + }, + "runLeftNormalization":{ + "shape":"RunLeftNormalization", + "documentation":"

The job's left normalization setting.

" + }, + "status":{ + "shape":"JobStatus", + "documentation":"

The job's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMsg", + "documentation":"

The job's status message.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the job was updated.

" + } + } + }, + "GetAnnotationStoreRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The store's name.

", + "location":"uri", + "locationName":"name" + } + } + }, + "GetAnnotationStoreResponse":{ + "type":"structure", + "required":[ + "creationTime", + "description", + "id", + "name", + "reference", + "sseConfig", + "status", + "statusMessage", + "storeArn", + "storeSizeBytes", + "tags", + "updateTime" + ], + "members":{ + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"StoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"ResourceId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The store's genome reference.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's server-side encryption (SSE) settings.

" + }, + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

A status message.

" + }, + "storeArn":{ + "shape":"Arn", + "documentation":"

The store's ARN.

" + }, + "storeFormat":{ + "shape":"StoreFormat", + "documentation":"

The store's annotation file format.

" + }, + "storeOptions":{ + "shape":"StoreOptions", + "documentation":"

The store's parsing options.

" + }, + "storeSizeBytes":{ + "shape":"Long", + "documentation":"

The store's size in bytes.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The store's tags.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the store was updated.

" + } + } + }, + "GetReadSetActivationJobRequest":{ + "type":"structure", + "required":[ + "id", + "sequenceStoreId" + ], + "members":{ + "id":{ + "shape":"ActivationJobId", + "documentation":"

The job's ID.

", + "location":"uri", + "locationName":"id" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The job's sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "GetReadSetActivationJobResponse":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "sequenceStoreId", + "status" + ], + "members":{ + "completionTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "id":{ + "shape":"ActivationJobId", + "documentation":"

The job's ID.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The job's sequence store ID.

" + }, + "sources":{ + "shape":"ActivateReadSetSourceList", + "documentation":"

The job's sources.

" + }, + "status":{ + "shape":"ReadSetActivationJobStatus", + "documentation":"

The job's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMessage", + "documentation":"

The job's status message.

" + } + } + }, + "GetReadSetExportJobRequest":{ + "type":"structure", + "required":[ + "id", + "sequenceStoreId" + ], + "members":{ + "id":{ + "shape":"ExportJobId", + "documentation":"

The job's ID.

", + "location":"uri", + "locationName":"id" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The job's sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "GetReadSetExportJobResponse":{ + "type":"structure", + "required":[ + "creationTime", + "destination", + "id", + "sequenceStoreId", + "status" + ], + "members":{ + "completionTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "destination":{ + "shape":"S3Destination", + "documentation":"

The job's destination in Amazon S3.

" + }, + "id":{ + "shape":"ExportJobId", + "documentation":"

The job's ID.

" + }, + "readSets":{ + "shape":"ExportReadSetDetailList", + "documentation":"

The job's read sets.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The job's sequence store ID.

" + }, + "status":{ + "shape":"ReadSetExportJobStatus", + "documentation":"

The job's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMessage", + "documentation":"

The job's status message.

" + } + } + }, + "GetReadSetImportJobRequest":{ + "type":"structure", + "required":[ + "id", + "sequenceStoreId" + ], + "members":{ + "id":{ + "shape":"ImportJobId", + "documentation":"

The job's ID.

", + "location":"uri", + "locationName":"id" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The job's sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "GetReadSetImportJobResponse":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "roleArn", + "sequenceStoreId", + "sources", + "status" + ], + "members":{ + "completionTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "id":{ + "shape":"ImportJobId", + "documentation":"

The job's ID.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The job's service role ARN.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The job's sequence store ID.

" + }, + "sources":{ + "shape":"ImportReadSetSourceList", + "documentation":"

The job's sources.

" + }, + "status":{ + "shape":"ReadSetImportJobStatus", + "documentation":"

The job's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMessage", + "documentation":"

The job's status message.

" + } + } + }, + "GetReadSetMetadataRequest":{ + "type":"structure", + "required":[ + "id", + "sequenceStoreId" + ], + "members":{ + "id":{ + "shape":"ReadSetId", + "documentation":"

The read set's ID.

", + "location":"uri", + "locationName":"id" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "GetReadSetMetadataResponse":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "fileType", + "id", + "sequenceStoreId", + "status" + ], + "members":{ + "arn":{ + "shape":"ReadSetArn", + "documentation":"

The read set's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the read set was created.

" + }, + "description":{ + "shape":"ReadSetDescription", + "documentation":"

The read set's description.

" + }, + "fileType":{ + "shape":"FileType", + "documentation":"

The read set's file type.

" + }, + "files":{ + "shape":"ReadSetFiles", + "documentation":"

The read set's files.

" + }, + "id":{ + "shape":"ReadSetId", + "documentation":"

The read set's ID.

" + }, + "name":{ + "shape":"ReadSetName", + "documentation":"

The read set's name.

" + }, + "referenceArn":{ + "shape":"ReferenceArn", + "documentation":"

The read set's genome reference ARN.

" + }, + "sampleId":{ + "shape":"SampleId", + "documentation":"

The read set's sample ID.

" + }, + "sequenceInformation":{ + "shape":"SequenceInformation", + "documentation":"

The read set's sequence information.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

" + }, + "status":{ + "shape":"ReadSetStatus", + "documentation":"

The read set's status.

" + }, + "subjectId":{ + "shape":"SubjectId", + "documentation":"

The read set's subject ID.

" + } + } + }, + "GetReadSetRequest":{ + "type":"structure", + "required":[ + "id", + "partNumber", + "sequenceStoreId" + ], + "members":{ + "file":{ + "shape":"ReadSetFile", + "documentation":"

The file to retrieve.

", + "location":"querystring", + "locationName":"file" + }, + "id":{ + "shape":"ReadSetId", + "documentation":"

The read set's ID.

", + "location":"uri", + "locationName":"id" + }, + "partNumber":{ + "shape":"GetReadSetRequestPartNumberInteger", + "documentation":"

The part number to retrieve.

", + "location":"querystring", + "locationName":"partNumber" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "GetReadSetRequestPartNumberInteger":{ + "type":"integer", + "box":true, + "max":10000, + "min":1 + }, + "GetReadSetResponse":{ + "type":"structure", + "members":{ + "payload":{ + "shape":"ReadSetStreamingBlob", + "documentation":"

The read set file payload.

" + } + }, + "payload":"payload" + }, + "GetReferenceImportJobRequest":{ + "type":"structure", + "required":[ + "id", + "referenceStoreId" + ], + "members":{ + "id":{ + "shape":"ImportJobId", + "documentation":"

The job's ID.

", + "location":"uri", + "locationName":"id" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The job's reference store ID.

", + "location":"uri", + "locationName":"referenceStoreId" + } + } + }, + "GetReferenceImportJobResponse":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "referenceStoreId", + "roleArn", + "sources", + "status" + ], + "members":{ + "completionTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "id":{ + "shape":"ImportJobId", + "documentation":"

The job's ID.

" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The job's reference store ID.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The job's service role ARN.

" + }, + "sources":{ + "shape":"ImportReferenceSourceList", + "documentation":"

The job's sources.

" + }, + "status":{ + "shape":"ReferenceImportJobStatus", + "documentation":"

The job's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMessage", + "documentation":"

The job's status message.

" + } + } + }, + "GetReferenceMetadataRequest":{ + "type":"structure", + "required":[ + "id", + "referenceStoreId" + ], + "members":{ + "id":{ + "shape":"ReferenceId", + "documentation":"

The reference's ID.

", + "location":"uri", + "locationName":"id" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The reference's reference store ID.

", + "location":"uri", + "locationName":"referenceStoreId" + } + } + }, + "GetReferenceMetadataResponse":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "id", + "md5", + "referenceStoreId", + "updateTime" + ], + "members":{ + "arn":{ + "shape":"ReferenceArn", + "documentation":"

The reference's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the reference was created.

" + }, + "description":{ + "shape":"ReferenceDescription", + "documentation":"

The reference's description.

" + }, + "files":{ + "shape":"ReferenceFiles", + "documentation":"

The reference's files.

" + }, + "id":{ + "shape":"ReferenceId", + "documentation":"

The reference's ID.

" + }, + "md5":{ + "shape":"Md5", + "documentation":"

The reference's MD5 checksum.

" + }, + "name":{ + "shape":"ReferenceName", + "documentation":"

The reference's name.

" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The reference's reference store ID.

" + }, + "status":{ + "shape":"ReferenceStatus", + "documentation":"

The reference's status.

" + }, + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the reference was updated.

" + } + } + }, + "GetReferenceRequest":{ + "type":"structure", + "required":[ + "id", + "partNumber", + "referenceStoreId" + ], + "members":{ + "file":{ + "shape":"ReferenceFile", + "documentation":"

The file to retrieve.

", + "location":"querystring", + "locationName":"file" + }, + "id":{ + "shape":"ReferenceId", + "documentation":"

The reference's ID.

", + "location":"uri", + "locationName":"id" + }, + "partNumber":{ + "shape":"GetReferenceRequestPartNumberInteger", + "documentation":"

The part number to retrieve.

", + "location":"querystring", + "locationName":"partNumber" + }, + "range":{ + "shape":"Range", + "documentation":"

The range to retrieve.

", + "location":"header", + "locationName":"Range" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The reference's store ID.

", + "location":"uri", + "locationName":"referenceStoreId" + } + } + }, + "GetReferenceRequestPartNumberInteger":{ + "type":"integer", + "box":true, + "max":10000, + "min":1 + }, + "GetReferenceResponse":{ + "type":"structure", + "members":{ + "payload":{ + "shape":"ReferenceStreamingBlob", + "documentation":"

The reference file payload.

" + } + }, + "payload":"payload" + }, + "GetReferenceStoreRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ReferenceStoreId", + "documentation":"

The store's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetReferenceStoreResponse":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "id" + ], + "members":{ + "arn":{ + "shape":"ReferenceStoreArn", + "documentation":"

The store's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"ReferenceStoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"ReferenceStoreId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"ReferenceStoreName", + "documentation":"

The store's name.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's server-side encryption (SSE) settings.

" + } + } + }, + "GetRunGroupRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"RunGroupId", + "documentation":"

The group's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetRunGroupResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"RunGroupArn", + "documentation":"

The group's ARN.

" + }, + "creationTime":{ + "shape":"RunGroupTimestamp", + "documentation":"

When the group was created.

" + }, + "id":{ + "shape":"RunGroupId", + "documentation":"

The group's ID.

" + }, + "maxCpus":{ + "shape":"GetRunGroupResponseMaxCpusInteger", + "documentation":"

The group's maximum number of CPUs to use.

" + }, + "maxDuration":{ + "shape":"GetRunGroupResponseMaxDurationInteger", + "documentation":"

The group's maximum run duration.

" + }, + "maxRuns":{ + "shape":"GetRunGroupResponseMaxRunsInteger", + "documentation":"

The maximum number of concurrent runs for the group.

" + }, + "name":{ + "shape":"RunGroupName", + "documentation":"

The group's name.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The group's tags.

" + } + } + }, + "GetRunGroupResponseMaxCpusInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "GetRunGroupResponseMaxDurationInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "GetRunGroupResponseMaxRunsInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "GetRunRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "export":{ + "shape":"RunExportList", + "documentation":"

The run's export format.

", + "location":"querystring", + "locationName":"export" + }, + "id":{ + "shape":"RunId", + "documentation":"

The run's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetRunResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"RunArn", + "documentation":"

The run's ARN.

" + }, + "creationTime":{ + "shape":"RunTimestamp", + "documentation":"

When the run was created.

" + }, + "definition":{ + "shape":"WorkflowDefinition", + "documentation":"

The run's definition.

" + }, + "digest":{ + "shape":"WorkflowDigest", + "documentation":"

The run's digest.

" + }, + "id":{ + "shape":"RunId", + "documentation":"

The run's ID.

" + }, + "logLevel":{ + "shape":"RunLogLevel", + "documentation":"

The run's log level.

" + }, + "name":{ + "shape":"RunName", + "documentation":"

The run's name.

" + }, + "outputUri":{ + "shape":"RunOutputUri", + "documentation":"

The run's output URI.

" + }, + "parameters":{ + "shape":"RunParameters", + "documentation":"

The run's parameters.

" + }, + "priority":{ + "shape":"GetRunResponsePriorityInteger", + "documentation":"

The run's priority.

" + }, + "resourceDigests":{ + "shape":"RunResourceDigests", + "documentation":"

The run's resource digests.

" + }, + "roleArn":{ + "shape":"RunRoleArn", + "documentation":"

The run's service role ARN.

" + }, + "runGroupId":{ + "shape":"RunGroupId", + "documentation":"

The run's group ID.

" + }, + "runId":{ + "shape":"RunId", + "documentation":"

The run's ID.

" + }, + "startTime":{ + "shape":"RunTimestamp", + "documentation":"

When the run started.

" + }, + "startedBy":{ + "shape":"RunStartedBy", + "documentation":"

Who started the run.

" + }, + "status":{ + "shape":"RunStatus", + "documentation":"

The run's status.

" + }, + "statusMessage":{ + "shape":"RunStatusMessage", + "documentation":"

The run's status message.

" + }, + "stopTime":{ + "shape":"RunTimestamp", + "documentation":"

The run's stop time.

" + }, + "storageCapacity":{ + "shape":"GetRunResponseStorageCapacityInteger", + "documentation":"

The run's storage capacity.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The run's tags.

" + }, + "workflowId":{ + "shape":"WorkflowId", + "documentation":"

The run's workflow ID.

" + }, + "workflowType":{ + "shape":"WorkflowType", + "documentation":"

The run's workflow type.

" + } + } + }, + "GetRunResponsePriorityInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":0 + }, + "GetRunResponseStorageCapacityInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":0 + }, + "GetRunTaskRequest":{ + "type":"structure", + "required":[ + "id", + "taskId" + ], + "members":{ + "id":{ + "shape":"RunId", + "documentation":"

The task's ID.

", + "location":"uri", + "locationName":"id" + }, + "taskId":{ + "shape":"TaskId", + "documentation":"

The task's ID.

", + "location":"uri", + "locationName":"taskId" + } + } + }, + "GetRunTaskResponse":{ + "type":"structure", + "members":{ + "cpus":{ + "shape":"GetRunTaskResponseCpusInteger", + "documentation":"

The task's CPU usage.

" + }, + "creationTime":{ + "shape":"TaskTimestamp", + "documentation":"

When the task was created.

" + }, + "logStream":{ + "shape":"TaskLogStream", + "documentation":"

The task's log stream.

" + }, + "memory":{ + "shape":"GetRunTaskResponseMemoryInteger", + "documentation":"

The task's memory setting.

" + }, + "name":{ + "shape":"TaskName", + "documentation":"

The task's name.

" + }, + "startTime":{ + "shape":"TaskTimestamp", + "documentation":"

The task's start time.

" + }, + "status":{ + "shape":"TaskStatus", + "documentation":"

The task's status.

" + }, + "statusMessage":{ + "shape":"TaskStatusMessage", + "documentation":"

The task's status message.

" + }, + "stopTime":{ + "shape":"TaskTimestamp", + "documentation":"

The task's stop time.

" + }, + "taskId":{ + "shape":"TaskId", + "documentation":"

The task's ID.

" + } + } + }, + "GetRunTaskResponseCpusInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "GetRunTaskResponseMemoryInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "GetSequenceStoreRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"SequenceStoreId", + "documentation":"

The store's ID.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetSequenceStoreResponse":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "id" + ], + "members":{ + "arn":{ + "shape":"SequenceStoreArn", + "documentation":"

The store's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"SequenceStoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"SequenceStoreId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"SequenceStoreName", + "documentation":"

The store's name.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's server-side encryption (SSE) settings.

" + } + } + }, + "GetVariantImportRequest":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"ResourceId", + "documentation":"

The job's ID.

", + "location":"uri", + "locationName":"jobId" + } + } + }, + "GetVariantImportResponse":{ + "type":"structure", + "required":[ + "creationTime", + "destinationName", + "id", + "items", + "roleArn", + "runLeftNormalization", + "status", + "statusMessage", + "updateTime" + ], + "members":{ + "completionTime":{ + "shape":"CompletionTime", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the job was created.

" + }, + "destinationName":{ + "shape":"StoreName", + "documentation":"

The job's destination variant store.

" + }, + "id":{ + "shape":"ResourceId", + "documentation":"

The job's ID.

" + }, + "items":{ + "shape":"VariantImportItemDetails", + "documentation":"

The job's items.

" + }, + "roleArn":{ + "shape":"Arn", + "documentation":"

The job's service role ARN.

" + }, + "runLeftNormalization":{ + "shape":"RunLeftNormalization", + "documentation":"

The job's left normalization setting.

" + }, + "status":{ + "shape":"JobStatus", + "documentation":"

The job's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMsg", + "documentation":"

The job's status message.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the job was updated.

" + } + } + }, + "GetVariantStoreRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The store's name.

", + "location":"uri", + "locationName":"name" + } + } + }, + "GetVariantStoreResponse":{ + "type":"structure", + "required":[ + "creationTime", + "description", + "id", + "name", + "reference", + "sseConfig", + "status", + "statusMessage", + "storeArn", + "storeSizeBytes", + "tags", + "updateTime" + ], + "members":{ + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"StoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"ResourceId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The store's genome reference.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's server-side encryption (SSE) settings.

" + }, + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

The store's status message.

" + }, + "storeArn":{ + "shape":"Arn", + "documentation":"

The store's ARN.

" + }, + "storeSizeBytes":{ + "shape":"Long", + "documentation":"

The store's size in bytes.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The store's tags.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the store was updated.

" + } + } + }, + "GetWorkflowRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "export":{ + "shape":"WorkflowExportList", + "documentation":"

The export format for the workflow.

", + "location":"querystring", + "locationName":"export" + }, + "id":{ + "shape":"WorkflowId", + "documentation":"

The workflow's ID.

", + "location":"uri", + "locationName":"id" + }, + "type":{ + "shape":"WorkflowType", + "documentation":"

The workflow's type.

", + "location":"querystring", + "locationName":"type" + } + } + }, + "GetWorkflowResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"WorkflowArn", + "documentation":"

The workflow's ARN.

" + }, + "creationTime":{ + "shape":"WorkflowTimestamp", + "documentation":"

When the workflow was created.

" + }, + "definition":{ + "shape":"WorkflowDefinition", + "documentation":"

The workflow's definition.

" + }, + "description":{ + "shape":"WorkflowDescription", + "documentation":"

The workflow's description.

" + }, + "digest":{ + "shape":"WorkflowDigest", + "documentation":"

The workflow's digest.

" + }, + "engine":{ + "shape":"WorkflowEngine", + "documentation":"

The workflow's engine.

" + }, + "id":{ + "shape":"WorkflowId", + "documentation":"

The workflow's ID.

" + }, + "main":{ + "shape":"WorkflowMain", + "documentation":"

The path of the main definition file for the workflow.

" + }, + "name":{ + "shape":"WorkflowName", + "documentation":"

The workflow's name.

" + }, + "parameterTemplate":{ + "shape":"WorkflowParameterTemplate", + "documentation":"

The workflow's parameter template.

" + }, + "status":{ + "shape":"WorkflowStatus", + "documentation":"

The workflow's status.

" + }, + "statusMessage":{ + "shape":"WorkflowStatusMessage", + "documentation":"

The workflow's status message.

" + }, + "storageCapacity":{ + "shape":"GetWorkflowResponseStorageCapacityInteger", + "documentation":"

The workflow's storage capacity.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The workflow's tags.

" + }, + "type":{ + "shape":"WorkflowType", + "documentation":"

The workflow's type.

" + } + } + }, + "GetWorkflowResponseStorageCapacityInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":0 + }, + "Header":{"type":"boolean"}, + "ImportJobId":{ + "type":"string", + "max":36, + "min":10, + "pattern":"^[0-9]+$" + }, + "ImportReadSetFilter":{ + "type":"structure", + "members":{ + "createdAfter":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's start date.

" + }, + "createdBefore":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's end date.

" + }, + "status":{ + "shape":"ReadSetImportJobStatus", + "documentation":"

A status to filter on.

" + } + }, + "documentation":"

A filter for import read set jobs.

" + }, + "ImportReadSetJobItem":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "roleArn", + "sequenceStoreId", + "status" + ], + "members":{ + "completionTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "id":{ + "shape":"ImportJobId", + "documentation":"

The job's ID.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The job's service role ARN.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The job's sequence store ID.

" + }, + "status":{ + "shape":"ReadSetImportJobStatus", + "documentation":"

The job's status.

" + } + }, + "documentation":"

An import read set job.

" + }, + "ImportReadSetJobList":{ + "type":"list", + "member":{"shape":"ImportReadSetJobItem"} + }, + "ImportReadSetSourceItem":{ + "type":"structure", + "required":[ + "sampleId", + "sourceFileType", + "sourceFiles", + "status", + "subjectId" + ], + "members":{ + "description":{ + "shape":"ReadSetDescription", + "documentation":"

The source's description.

" + }, + "generatedFrom":{ + "shape":"GeneratedFrom", + "documentation":"

Where the source originated.

" + }, + "name":{ + "shape":"ReadSetName", + "documentation":"

The source's name.

" + }, + "referenceArn":{ + "shape":"ReferenceArn", + "documentation":"

The source's genome reference ARN.

" + }, + "sampleId":{ + "shape":"SampleId", + "documentation":"

The source's sample ID.

" + }, + "sourceFileType":{ + "shape":"FileType", + "documentation":"

The source's file type.

" + }, + "sourceFiles":{ + "shape":"SourceFiles", + "documentation":"

The source files' location in Amazon S3.

" + }, + "status":{ + "shape":"ReadSetImportJobItemStatus", + "documentation":"

The source's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMessage", + "documentation":"

The source's status message.

" + }, + "subjectId":{ + "shape":"SubjectId", + "documentation":"

The source's subject ID.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The source's tags.

" + } + }, + "documentation":"

A source for an import read set job.

" + }, + "ImportReadSetSourceList":{ + "type":"list", + "member":{"shape":"ImportReadSetSourceItem"} + }, + "ImportReferenceFilter":{ + "type":"structure", + "members":{ + "createdAfter":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's start date.

" + }, + "createdBefore":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's end date.

" + }, + "status":{ + "shape":"ReferenceImportJobStatus", + "documentation":"

A status to filter on.

" + } + }, + "documentation":"

A filter for import references.

" + }, + "ImportReferenceJobItem":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "referenceStoreId", + "roleArn", + "status" + ], + "members":{ + "completionTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "id":{ + "shape":"ImportJobId", + "documentation":"

The job's ID.

" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The job's reference store ID.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The job's service role ARN.

" + }, + "status":{ + "shape":"ReferenceImportJobStatus", + "documentation":"

The job's status.

" + } + }, + "documentation":"

An import reference job.

" + }, + "ImportReferenceJobList":{ + "type":"list", + "member":{"shape":"ImportReferenceJobItem"} + }, + "ImportReferenceSourceItem":{ + "type":"structure", + "required":["status"], + "members":{ + "description":{ + "shape":"ReferenceDescription", + "documentation":"

The source's description.

" + }, + "name":{ + "shape":"ReferenceName", + "documentation":"

The source's name.

" + }, + "sourceFile":{ + "shape":"S3Uri", + "documentation":"

The source file's location in Amazon S3.

" + }, + "status":{ + "shape":"ReferenceImportJobItemStatus", + "documentation":"

The source's status.

" + }, + "statusMessage":{ + "shape":"JobStatusMessage", + "documentation":"

The source's status message.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The source's tags.

" + } + }, + "documentation":"

An genome reference source.

" + }, + "ImportReferenceSourceList":{ + "type":"list", + "member":{"shape":"ImportReferenceSourceItem"} + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

An unexpected error occurred. Try the request again.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "JobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "CANCELLED", + "COMPLETED", + "FAILED" + ] + }, + "JobStatusMessage":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "JobStatusMsg":{"type":"string"}, + "LineSep":{ + "type":"string", + "max":20, + "min":1 + }, + "ListAnnotationImportJobsFilter":{ + "type":"structure", + "members":{ + "status":{ + "shape":"JobStatus", + "documentation":"

A status to filter on.

" + }, + "storeName":{ + "shape":"String", + "documentation":"

A store name to filter on.

" + } + }, + "documentation":"

A filter for annotation import jobs.

" + }, + "ListAnnotationImportJobsRequest":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"ListAnnotationImportJobsFilter", + "documentation":"

A filter to apply to the list.

" + }, + "ids":{ + "shape":"ListAnnotationImportJobsRequestIdsList", + "documentation":"

IDs of annotation import jobs to retrieve.

" + }, + "maxResults":{ + "shape":"ListAnnotationImportJobsRequestMaxResultsInteger", + "documentation":"

The maximum number of jobs to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"ListAnnotationImportJobsRequestNextTokenString", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListAnnotationImportJobsRequestIdsList":{ + "type":"list", + "member":{"shape":"ResourceIdentifier"}, + "max":20, + "min":1 + }, + "ListAnnotationImportJobsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListAnnotationImportJobsRequestNextTokenString":{ + "type":"string", + "max":10000, + "min":1 + }, + "ListAnnotationImportJobsResponse":{ + "type":"structure", + "members":{ + "annotationImportJobs":{ + "shape":"AnnotationImportJobItems", + "documentation":"

A list of jobs.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "ListAnnotationStoresFilter":{ + "type":"structure", + "members":{ + "status":{ + "shape":"StoreStatus", + "documentation":"

A status to filter on.

" + } + }, + "documentation":"

A filter for annotation stores.

" + }, + "ListAnnotationStoresRequest":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"ListAnnotationStoresFilter", + "documentation":"

A filter to apply to the list.

" + }, + "ids":{ + "shape":"ListAnnotationStoresRequestIdsList", + "documentation":"

IDs of stores to list.

" + }, + "maxResults":{ + "shape":"ListAnnotationStoresRequestMaxResultsInteger", + "documentation":"

The maximum number of stores to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"ListAnnotationStoresRequestNextTokenString", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListAnnotationStoresRequestIdsList":{ + "type":"list", + "member":{"shape":"ResourceIdentifier"}, + "max":20, + "min":1 + }, + "ListAnnotationStoresRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListAnnotationStoresRequestNextTokenString":{ + "type":"string", + "max":10000, + "min":1 + }, + "ListAnnotationStoresResponse":{ + "type":"structure", + "members":{ + "annotationStores":{ + "shape":"AnnotationStoreItems", + "documentation":"

A list of stores.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "ListReadSetActivationJobsRequest":{ + "type":"structure", + "required":["sequenceStoreId"], + "members":{ + "filter":{ + "shape":"ActivateReadSetFilter", + "documentation":"

A filter to apply to the list.

" + }, + "maxResults":{ + "shape":"ListReadSetActivationJobsRequestMaxResultsInteger", + "documentation":"

The maximum number of read set activation jobs to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "ListReadSetActivationJobsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListReadSetActivationJobsResponse":{ + "type":"structure", + "members":{ + "activationJobs":{ + "shape":"ActivateReadSetJobList", + "documentation":"

A list of jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "ListReadSetExportJobsRequest":{ + "type":"structure", + "required":["sequenceStoreId"], + "members":{ + "filter":{ + "shape":"ExportReadSetFilter", + "documentation":"

A filter to apply to the list.

" + }, + "maxResults":{ + "shape":"ListReadSetExportJobsRequestMaxResultsInteger", + "documentation":"

The maximum number of jobs to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The jobs' sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "ListReadSetExportJobsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListReadSetExportJobsResponse":{ + "type":"structure", + "members":{ + "exportJobs":{ + "shape":"ExportReadSetJobDetailList", + "documentation":"

A list of jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "ListReadSetImportJobsRequest":{ + "type":"structure", + "required":["sequenceStoreId"], + "members":{ + "filter":{ + "shape":"ImportReadSetFilter", + "documentation":"

A filter to apply to the list.

" + }, + "maxResults":{ + "shape":"ListReadSetImportJobsRequestMaxResultsInteger", + "documentation":"

The maximum number of jobs to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The jobs' sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "ListReadSetImportJobsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListReadSetImportJobsResponse":{ + "type":"structure", + "members":{ + "importJobs":{ + "shape":"ImportReadSetJobList", + "documentation":"

A list of jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "ListReadSetsRequest":{ + "type":"structure", + "required":["sequenceStoreId"], + "members":{ + "filter":{ + "shape":"ReadSetFilter", + "documentation":"

A filter to apply to the list.

" + }, + "maxResults":{ + "shape":"ListReadSetsRequestMaxResultsInteger", + "documentation":"

The maximum number of read sets to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The jobs' sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + } + } + }, + "ListReadSetsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListReadSetsResponse":{ + "type":"structure", + "required":["readSets"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that's included if more results are available.

" + }, + "readSets":{ + "shape":"ReadSetList", + "documentation":"

A list of read sets.

" + } + } + }, + "ListReferenceImportJobsRequest":{ + "type":"structure", + "required":["referenceStoreId"], + "members":{ + "filter":{ + "shape":"ImportReferenceFilter", + "documentation":"

A filter to apply to the list.

" + }, + "maxResults":{ + "shape":"ListReferenceImportJobsRequestMaxResultsInteger", + "documentation":"

The maximum number of jobs to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The job's reference store ID.

", + "location":"uri", + "locationName":"referenceStoreId" + } + } + }, + "ListReferenceImportJobsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListReferenceImportJobsResponse":{ + "type":"structure", + "members":{ + "importJobs":{ + "shape":"ImportReferenceJobList", + "documentation":"

A lis of jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "ListReferenceStoresRequest":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"ReferenceStoreFilter", + "documentation":"

A filter to apply to the list.

" + }, + "maxResults":{ + "shape":"ListReferenceStoresRequestMaxResultsInteger", + "documentation":"

The maximum number of stores to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListReferenceStoresRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListReferenceStoresResponse":{ + "type":"structure", + "required":["referenceStores"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that's included if more results are available.

" + }, + "referenceStores":{ + "shape":"ReferenceStoreDetailList", + "documentation":"

A list of reference stores.

" + } + } + }, + "ListReferencesRequest":{ + "type":"structure", + "required":["referenceStoreId"], + "members":{ + "filter":{ + "shape":"ReferenceFilter", + "documentation":"

A filter to apply to the list.

" + }, + "maxResults":{ + "shape":"ListReferencesRequestMaxResultsInteger", + "documentation":"

The maximum number of references to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The references' reference store ID.

", + "location":"uri", + "locationName":"referenceStoreId" + } + } + }, + "ListReferencesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListReferencesResponse":{ + "type":"structure", + "required":["references"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that's included if more results are available.

" + }, + "references":{ + "shape":"ReferenceList", + "documentation":"

A list of references.

" + } + } + }, + "ListRunGroupsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListRunGroupsRequestMaxResultsInteger", + "documentation":"

The maximum number of run groups to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "name":{ + "shape":"RunGroupName", + "documentation":"

The run groups' name.

", + "location":"querystring", + "locationName":"name" + }, + "startingToken":{ + "shape":"RunGroupListToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"startingToken" + } + } + }, + "ListRunGroupsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListRunGroupsResponse":{ + "type":"structure", + "members":{ + "items":{ + "shape":"RunGroupList", + "documentation":"

A list of groups.

" + }, + "nextToken":{ + "shape":"RunGroupListToken", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "ListRunTasksRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"RunId", + "documentation":"

The run's ID.

", + "location":"uri", + "locationName":"id" + }, + "maxResults":{ + "shape":"ListRunTasksRequestMaxResultsInteger", + "documentation":"

The maximum number of run tasks to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "startingToken":{ + "shape":"TaskListToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"startingToken" + }, + "status":{ + "shape":"TaskStatus", + "documentation":"

Filter the list by status.

", + "location":"querystring", + "locationName":"status" + } + } + }, + "ListRunTasksRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListRunTasksResponse":{ + "type":"structure", + "members":{ + "items":{ + "shape":"TaskList", + "documentation":"

A list of tasks.

" + }, + "nextToken":{ + "shape":"TaskListToken", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "ListRunsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListRunsRequestMaxResultsInteger", + "documentation":"

The maximum number of runs to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "name":{ + "shape":"RunName", + "documentation":"

Filter the list by run name.

", + "location":"querystring", + "locationName":"name" + }, + "runGroupId":{ + "shape":"RunGroupId", + "documentation":"

Filter the list by run group ID.

", + "location":"querystring", + "locationName":"runGroupId" + }, + "startingToken":{ + "shape":"RunListToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"startingToken" + } + } + }, + "ListRunsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListRunsResponse":{ + "type":"structure", + "members":{ + "items":{ + "shape":"RunList", + "documentation":"

A list of runs.

" + }, + "nextToken":{ + "shape":"RunListToken", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "ListSequenceStoresRequest":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"SequenceStoreFilter", + "documentation":"

A filter to apply to the list.

" + }, + "maxResults":{ + "shape":"ListSequenceStoresRequestMaxResultsInteger", + "documentation":"

The maximum number of stores to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSequenceStoresRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListSequenceStoresResponse":{ + "type":"structure", + "required":["sequenceStores"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that's included if more results are available.

" + }, + "sequenceStores":{ + "shape":"SequenceStoreDetailList", + "documentation":"

A list of sequence stores.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"TagArn", + "documentation":"

The resource's ARN.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

A list of tags.

" + } + } + }, + "ListVariantImportJobsFilter":{ + "type":"structure", + "members":{ + "status":{ + "shape":"JobStatus", + "documentation":"

A status to filter on.

" + }, + "storeName":{ + "shape":"String", + "documentation":"

A store name to filter on.

" + } + }, + "documentation":"

A filter for variant import jobs.

" + }, + "ListVariantImportJobsRequest":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"ListVariantImportJobsFilter", + "documentation":"

A filter to apply to the list.

" + }, + "ids":{ + "shape":"ListVariantImportJobsRequestIdsList", + "documentation":"

A list of job IDs.

" + }, + "maxResults":{ + "shape":"ListVariantImportJobsRequestMaxResultsInteger", + "documentation":"

The maximum number of import jobs to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"ListVariantImportJobsRequestNextTokenString", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListVariantImportJobsRequestIdsList":{ + "type":"list", + "member":{"shape":"ResourceIdentifier"}, + "max":20, + "min":1 + }, + "ListVariantImportJobsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListVariantImportJobsRequestNextTokenString":{ + "type":"string", + "max":10000, + "min":1 + }, + "ListVariantImportJobsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

A pagination token that's included if more results are available.

" + }, + "variantImportJobs":{ + "shape":"VariantImportJobItems", + "documentation":"

A list of jobs.

" + } + } + }, + "ListVariantStoresFilter":{ + "type":"structure", + "members":{ + "status":{ + "shape":"StoreStatus", + "documentation":"

A status to filter on.

" + } + }, + "documentation":"

A filter for variant stores.

" + }, + "ListVariantStoresRequest":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"ListVariantStoresFilter", + "documentation":"

A filter to apply to the list.

" + }, + "ids":{ + "shape":"ListVariantStoresRequestIdsList", + "documentation":"

A list of store IDs.

" + }, + "maxResults":{ + "shape":"ListVariantStoresRequestMaxResultsInteger", + "documentation":"

The maximum number of stores to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"ListVariantStoresRequestNextTokenString", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListVariantStoresRequestIdsList":{ + "type":"list", + "member":{"shape":"ResourceIdentifier"}, + "max":20, + "min":1 + }, + "ListVariantStoresRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListVariantStoresRequestNextTokenString":{ + "type":"string", + "max":10000, + "min":1 + }, + "ListVariantStoresResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

A pagination token that's included if more results are available.

" + }, + "variantStores":{ + "shape":"VariantStoreItems", + "documentation":"

A list of variant stores.

" + } + } + }, + "ListWorkflowsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListWorkflowsRequestMaxResultsInteger", + "documentation":"

The maximum number of workflows to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "name":{ + "shape":"WorkflowName", + "documentation":"

The workflows' name.

", + "location":"querystring", + "locationName":"name" + }, + "startingToken":{ + "shape":"WorkflowListToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"startingToken" + }, + "type":{ + "shape":"WorkflowType", + "documentation":"

The workflows' type.

", + "location":"querystring", + "locationName":"type" + } + } + }, + "ListWorkflowsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListWorkflowsResponse":{ + "type":"structure", + "members":{ + "items":{ + "shape":"WorkflowList", + "documentation":"

The workflows' items.

" + }, + "nextToken":{ + "shape":"WorkflowListToken", + "documentation":"

A pagination token that's included if more results are available.

" + } + } + }, + "Long":{ + "type":"long", + "box":true + }, + "Md5":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\p{L}||\\p{N}]+$" + }, + "NextToken":{ + "type":"string", + "max":6144, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "PrimitiveBoolean":{"type":"boolean"}, + "Quote":{ + "type":"string", + "max":1, + "min":1 + }, + "QuoteAll":{"type":"boolean"}, + "Range":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[\\p{N}||\\p{P}]+$" + }, + "RangeNotSatisfiableException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The ranges specified in the request are not valid.

", + "error":{ + "httpStatusCode":416, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "ReadOptions":{ + "type":"structure", + "members":{ + "comment":{ + "shape":"CommentChar", + "documentation":"

The file's comment character.

" + }, + "encoding":{ + "shape":"Encoding", + "documentation":"

The file's encoding.

" + }, + "escape":{ + "shape":"EscapeChar", + "documentation":"

A character for escaping quotes in the file.

" + }, + "escapeQuotes":{ + "shape":"EscapeQuotes", + "documentation":"

Whether quotes need to be escaped in the file.

" + }, + "header":{ + "shape":"Header", + "documentation":"

Whether the file has a header row.

" + }, + "lineSep":{ + "shape":"LineSep", + "documentation":"

A line separator for the file.

" + }, + "quote":{ + "shape":"Quote", + "documentation":"

The file's quote character.

" + }, + "quoteAll":{ + "shape":"QuoteAll", + "documentation":"

Whether all values need to be quoted, or just those that contain quotes.

" + }, + "sep":{ + "shape":"Separator", + "documentation":"

The file's field separator.

" + } + }, + "documentation":"

Read options for an annotation import job.

" + }, + "ReadSetActivationJobItemStatus":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "FINISHED", + "FAILED" + ] + }, + "ReadSetActivationJobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "CANCELLING", + "CANCELLED", + "FAILED", + "COMPLETED", + "COMPLETED_WITH_FAILURES" + ] + }, + "ReadSetArn":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^arn:.+$" + }, + "ReadSetBatchError":{ + "type":"structure", + "required":[ + "code", + "id", + "message" + ], + "members":{ + "code":{ + "shape":"String", + "documentation":"

The error's code.

" + }, + "id":{ + "shape":"ReadSetId", + "documentation":"

The error's ID.

" + }, + "message":{ + "shape":"String", + "documentation":"

The error's message.

" + } + }, + "documentation":"

An error from a batch read set operation.

" + }, + "ReadSetBatchErrorList":{ + "type":"list", + "member":{"shape":"ReadSetBatchError"} + }, + "ReadSetDescription":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "ReadSetExportJobItemStatus":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "FINISHED", + "FAILED" + ] + }, + "ReadSetExportJobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "CANCELLING", + "CANCELLED", + "FAILED", + "COMPLETED", + "COMPLETED_WITH_FAILURES" + ] + }, + "ReadSetFile":{ + "type":"string", + "enum":[ + "SOURCE1", + "SOURCE2", + "INDEX" + ] + }, + "ReadSetFiles":{ + "type":"structure", + "members":{ + "index":{ + "shape":"FileInformation", + "documentation":"

The files' index.

" + }, + "source1":{ + "shape":"FileInformation", + "documentation":"

The location of the first file in Amazon S3.

" + }, + "source2":{ + "shape":"FileInformation", + "documentation":"

The location of the second file in Amazon S3.

" + } + }, + "documentation":"

Files in a read set.

" + }, + "ReadSetFilter":{ + "type":"structure", + "members":{ + "createdAfter":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's start date.

" + }, + "createdBefore":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's end date.

" + }, + "name":{ + "shape":"ReadSetName", + "documentation":"

A name to filter on.

" + }, + "referenceArn":{ + "shape":"ReferenceArn", + "documentation":"

A genome reference ARN to filter on.

" + }, + "status":{ + "shape":"ReadSetStatus", + "documentation":"

A status to filter on.

" + } + }, + "documentation":"

A filter for read sets.

" + }, + "ReadSetId":{ + "type":"string", + "max":36, + "min":10, + "pattern":"^[0-9]+$" + }, + "ReadSetIdList":{ + "type":"list", + "member":{"shape":"ReadSetId"}, + "max":100, + "min":1 + }, + "ReadSetImportJobItemStatus":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "FINISHED", + "FAILED" + ] + }, + "ReadSetImportJobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "CANCELLING", + "CANCELLED", + "FAILED", + "COMPLETED", + "COMPLETED_WITH_FAILURES" + ] + }, + "ReadSetList":{ + "type":"list", + "member":{"shape":"ReadSetListItem"} + }, + "ReadSetListItem":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "fileType", + "id", + "sequenceStoreId", + "status" + ], + "members":{ + "arn":{ + "shape":"ReadSetArn", + "documentation":"

The read set's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the read set was created.

" + }, + "description":{ + "shape":"ReadSetDescription", + "documentation":"

The read set's description.

" + }, + "fileType":{ + "shape":"FileType", + "documentation":"

The read set's file type.

" + }, + "id":{ + "shape":"ReadSetId", + "documentation":"

The read set's ID.

" + }, + "name":{ + "shape":"ReadSetName", + "documentation":"

The read set's name.

" + }, + "referenceArn":{ + "shape":"ReferenceArn", + "documentation":"

The read set's genome reference ARN.

" + }, + "sampleId":{ + "shape":"SampleId", + "documentation":"

The read set's sample ID.

" + }, + "sequenceInformation":{"shape":"SequenceInformation"}, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

" + }, + "status":{ + "shape":"ReadSetStatus", + "documentation":"

The read set's status.

" + }, + "subjectId":{ + "shape":"SubjectId", + "documentation":"

The read set's subject ID.

" + } + }, + "documentation":"

A read set.

" + }, + "ReadSetName":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "ReadSetStatus":{ + "type":"string", + "enum":[ + "ARCHIVED", + "ACTIVATING", + "ACTIVE", + "DELETING", + "DELETED" + ] + }, + "ReadSetStreamingBlob":{ + "type":"blob", + "streaming":true + }, + "ReferenceArn":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^arn:.+$" + }, + "ReferenceDescription":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "ReferenceFile":{ + "type":"string", + "enum":[ + "SOURCE", + "INDEX" + ] + }, + "ReferenceFiles":{ + "type":"structure", + "members":{ + "index":{ + "shape":"FileInformation", + "documentation":"

The files' index.

" + }, + "source":{ + "shape":"FileInformation", + "documentation":"

The source file's location in Amazon S3.

" + } + }, + "documentation":"

A set of genome reference files.

" + }, + "ReferenceFilter":{ + "type":"structure", + "members":{ + "createdAfter":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's start date.

" + }, + "createdBefore":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's end date.

" + }, + "md5":{ + "shape":"Md5", + "documentation":"

An MD5 checksum to filter on.

" + }, + "name":{ + "shape":"ReferenceName", + "documentation":"

A name to filter on.

" + } + }, + "documentation":"

A filter for references.

" + }, + "ReferenceId":{ + "type":"string", + "max":36, + "min":10, + "pattern":"^[0-9]+$" + }, + "ReferenceImportJobItemStatus":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "FINISHED", + "FAILED" + ] + }, + "ReferenceImportJobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "CANCELLING", + "CANCELLED", + "FAILED", + "COMPLETED", + "COMPLETED_WITH_FAILURES" + ] + }, + "ReferenceItem":{ + "type":"structure", + "members":{ + "referenceArn":{ + "shape":"ReferenceArn", + "documentation":"

The reference's ARN.

" + } + }, + "documentation":"

A genome reference.

", + "union":true + }, + "ReferenceList":{ + "type":"list", + "member":{"shape":"ReferenceListItem"} + }, + "ReferenceListItem":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "id", + "md5", + "referenceStoreId", + "updateTime" + ], + "members":{ + "arn":{ + "shape":"ReferenceArn", + "documentation":"

The reference's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the reference was created.

" + }, + "description":{ + "shape":"ReferenceDescription", + "documentation":"

The reference's description.

" + }, + "id":{ + "shape":"ReferenceId", + "documentation":"

The reference's ID.

" + }, + "md5":{ + "shape":"Md5", + "documentation":"

The reference's MD5 checksum.

" + }, + "name":{ + "shape":"ReferenceName", + "documentation":"

The reference's name.

" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The reference's store ID.

" + }, + "status":{ + "shape":"ReferenceStatus", + "documentation":"

The reference's status.

" + }, + "updateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the reference was updated.

" + } + }, + "documentation":"

A genome reference.

" + }, + "ReferenceName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "ReferenceStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING", + "DELETED" + ] + }, + "ReferenceStoreArn":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^arn:.+$" + }, + "ReferenceStoreDescription":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "ReferenceStoreDetail":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "id" + ], + "members":{ + "arn":{ + "shape":"ReferenceStoreArn", + "documentation":"

The store's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"ReferenceStoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"ReferenceStoreId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"ReferenceStoreName", + "documentation":"

The store's name.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's server-side encryption (SSE) settings.

" + } + }, + "documentation":"

Details about a reference store.

" + }, + "ReferenceStoreDetailList":{ + "type":"list", + "member":{"shape":"ReferenceStoreDetail"} + }, + "ReferenceStoreFilter":{ + "type":"structure", + "members":{ + "createdAfter":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's start date.

" + }, + "createdBefore":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's end date.

" + }, + "name":{ + "shape":"ReferenceStoreName", + "documentation":"

The name to filter on.

" + } + }, + "documentation":"

A filter for reference stores.

" + }, + "ReferenceStoreId":{ + "type":"string", + "max":36, + "min":10, + "pattern":"^[0-9]+$" + }, + "ReferenceStoreName":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "ReferenceStreamingBlob":{ + "type":"blob", + "streaming":true + }, + "RequestTimeoutException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request timed out.

", + "error":{ + "httpStatusCode":408, + "senderFault":true + }, + "exception":true + }, + "ResourceId":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "ResourceIdentifier":{ + "type":"string", + "max":50, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The target resource was not found in the current Region.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:.*" + }, + "RunArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:.+$" + }, + "RunExport":{ + "type":"string", + "enum":["DEFINITION"], + "max":64, + "min":1 + }, + "RunExportList":{ + "type":"list", + "member":{"shape":"RunExport"}, + "max":32, + "min":0 + }, + "RunGroupArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:.+$" + }, + "RunGroupId":{ + "type":"string", + "max":18, + "min":1, + "pattern":"^[0-9]+$" + }, + "RunGroupList":{ + "type":"list", + "member":{"shape":"RunGroupListItem"} + }, + "RunGroupListItem":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"RunGroupArn", + "documentation":"

The group's ARN.

" + }, + "creationTime":{ + "shape":"RunGroupTimestamp", + "documentation":"

When the group was created.

" + }, + "id":{ + "shape":"RunGroupId", + "documentation":"

The group's ID.

" + }, + "maxCpus":{ + "shape":"RunGroupListItemMaxCpusInteger", + "documentation":"

The group's maximum CPU count setting.

" + }, + "maxDuration":{ + "shape":"RunGroupListItemMaxDurationInteger", + "documentation":"

The group's maximum duration setting.

" + }, + "maxRuns":{ + "shape":"RunGroupListItemMaxRunsInteger", + "documentation":"

The group's maximum concurrent run setting.

" + }, + "name":{ + "shape":"RunGroupName", + "documentation":"

The group's name.

" + } + }, + "documentation":"

A run group.

" + }, + "RunGroupListItemMaxCpusInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "RunGroupListItemMaxDurationInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "RunGroupListItemMaxRunsInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "RunGroupListToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunGroupName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunGroupRequestId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunGroupTimestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "RunId":{ + "type":"string", + "max":18, + "min":1, + "pattern":"^[0-9]+$" + }, + "RunLeftNormalization":{"type":"boolean"}, + "RunList":{ + "type":"list", + "member":{"shape":"RunListItem"} + }, + "RunListItem":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"RunArn", + "documentation":"

The run's ARN.

" + }, + "creationTime":{ + "shape":"RunTimestamp", + "documentation":"

When the run was created.

" + }, + "id":{ + "shape":"RunId", + "documentation":"

The run's ID.

" + }, + "name":{ + "shape":"RunName", + "documentation":"

The run's name.

" + }, + "priority":{ + "shape":"RunListItemPriorityInteger", + "documentation":"

The run's priority.

" + }, + "startTime":{ + "shape":"RunTimestamp", + "documentation":"

When the run started.

" + }, + "status":{ + "shape":"RunStatus", + "documentation":"

The run's status.

" + }, + "stopTime":{ + "shape":"RunTimestamp", + "documentation":"

When the run stopped.

" + }, + "storageCapacity":{ + "shape":"RunListItemStorageCapacityInteger", + "documentation":"

The run's storage capacity.

" + }, + "workflowId":{ + "shape":"WorkflowId", + "documentation":"

The run's workflow ID.

" + } + }, + "documentation":"

A workflow run.

" + }, + "RunListItemPriorityInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":0 + }, + "RunListItemStorageCapacityInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":0 + }, + "RunListToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunLogLevel":{ + "type":"string", + "enum":[ + "OFF", + "FATAL", + "ERROR", + "ALL" + ], + "max":64, + "min":1 + }, + "RunName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunOutputUri":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunParameters":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "RunRequestId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunResourceDigest":{ + "type":"string", + "max":64, + "min":0, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunResourceDigestKey":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunResourceDigests":{ + "type":"map", + "key":{"shape":"RunResourceDigestKey"}, + "value":{"shape":"RunResourceDigest"} + }, + "RunRoleArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:.+$" + }, + "RunStartedBy":{ + "type":"string", + "max":128, + "min":1 + }, + "RunStatus":{ + "type":"string", + "enum":[ + "PENDING", + "STARTING", + "RUNNING", + "STOPPING", + "COMPLETED", + "DELETED", + "CANCELLED", + "FAILED" + ], + "max":64, + "min":1 + }, + "RunStatusMessage":{ + "type":"string", + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "RunTimestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "S3Destination":{ + "type":"string", + "pattern":"^s3://([a-z0-9][a-z0-9-.]{1,61}[a-z0-9])/?((.{1,1024})/)?$" + }, + "S3Uri":{ + "type":"string", + "pattern":"^s3://([a-z0-9][a-z0-9-.]{1,61}[a-z0-9])/(.{1,1024})$" + }, + "SampleId":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "SchemaItem":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"SchemaValueType"}, + "max":1, + "min":1 + }, + "SchemaValueType":{ + "type":"string", + "enum":[ + "LONG", + "INT", + "STRING", + "FLOAT", + "DOUBLE", + "BOOLEAN" + ] + }, + "Separator":{ + "type":"string", + "max":20, + "min":1 + }, + "SequenceInformation":{ + "type":"structure", + "members":{ + "alignment":{ + "shape":"String", + "documentation":"

The sequence's alignment setting.

" + }, + "generatedFrom":{ + "shape":"GeneratedFrom", + "documentation":"

Where the sequence originated.

" + }, + "totalBaseCount":{ + "shape":"Long", + "documentation":"

The sequence's total base count.

" + }, + "totalReadCount":{ + "shape":"Long", + "documentation":"

The sequence's total read count.

" + } + }, + "documentation":"

Details about a sequence.

" + }, + "SequenceStoreArn":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^arn:.+$" + }, + "SequenceStoreDescription":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "SequenceStoreDetail":{ + "type":"structure", + "required":[ + "arn", + "creationTime", + "id" + ], + "members":{ + "arn":{ + "shape":"SequenceStoreArn", + "documentation":"

The store's ARN.

" + }, + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"SequenceStoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"SequenceStoreId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"SequenceStoreName", + "documentation":"

The store's name.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's server-side encryption (SSE) settings.

" + } + }, + "documentation":"

Details about a sequence store.

" + }, + "SequenceStoreDetailList":{ + "type":"list", + "member":{"shape":"SequenceStoreDetail"} + }, + "SequenceStoreFilter":{ + "type":"structure", + "members":{ + "createdAfter":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's start date.

" + }, + "createdBefore":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The filter's end date.

" + }, + "name":{ + "shape":"SequenceStoreName", + "documentation":"

A name to filter on.

" + } + }, + "documentation":"

A filter for a sequence store.

" + }, + "SequenceStoreId":{ + "type":"string", + "max":36, + "min":10, + "pattern":"^[0-9]+$" + }, + "SequenceStoreName":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request exceeds a service quota.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SourceFiles":{ + "type":"structure", + "required":["source1"], + "members":{ + "source1":{ + "shape":"S3Uri", + "documentation":"

The location of the first file in Amazon S3.

" + }, + "source2":{ + "shape":"S3Uri", + "documentation":"

The location of the second file in Amazon S3.

" + } + }, + "documentation":"

Source files for a sequence.

" + }, + "SseConfig":{ + "type":"structure", + "required":["type"], + "members":{ + "keyArn":{ + "shape":"SseConfigKeyArnString", + "documentation":"

An encryption key ARN.

" + }, + "type":{ + "shape":"EncryptionType", + "documentation":"

The encryption type.

" + } + }, + "documentation":"

Server-side encryption (SSE) settings for a store.

" + }, + "SseConfigKeyArnString":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:([^: ]*):([^: ]*):([^: ]*):([0-9]{12}):([^: ]*)" + }, + "StartAnnotationImportRequest":{ + "type":"structure", + "required":[ + "destinationName", + "items", + "roleArn" + ], + "members":{ + "destinationName":{ + "shape":"StoreName", + "documentation":"

A destination annotation store for the job.

" + }, + "formatOptions":{ + "shape":"FormatOptions", + "documentation":"

Formatting options for the annotation file.

" + }, + "items":{ + "shape":"AnnotationImportItemSources", + "documentation":"

Items to import.

" + }, + "roleArn":{ + "shape":"Arn", + "documentation":"

A service role for the job.

" + }, + "runLeftNormalization":{ + "shape":"RunLeftNormalization", + "documentation":"

The job's left normalization setting.

" + } + } + }, + "StartAnnotationImportResponse":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"ResourceId", + "documentation":"

The job's ID.

" + } + } + }, + "StartReadSetActivationJobRequest":{ + "type":"structure", + "required":[ + "sequenceStoreId", + "sources" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

To ensure that jobs don't run multiple times, specify a unique token for each job.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + }, + "sources":{ + "shape":"StartReadSetActivationJobRequestSourcesList", + "documentation":"

The job's sources.

" + } + } + }, + "StartReadSetActivationJobRequestSourcesList":{ + "type":"list", + "member":{"shape":"StartReadSetActivationJobSourceItem"}, + "max":20, + "min":1 + }, + "StartReadSetActivationJobResponse":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "sequenceStoreId", + "status" + ], + "members":{ + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "id":{ + "shape":"ActivationJobId", + "documentation":"

The job's ID.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

" + }, + "status":{ + "shape":"ReadSetActivationJobStatus", + "documentation":"

The job's status.

" + } + } + }, + "StartReadSetActivationJobSourceItem":{ + "type":"structure", + "required":["readSetId"], + "members":{ + "readSetId":{ + "shape":"ReadSetId", + "documentation":"

The source's read set ID.

" + } + }, + "documentation":"

A source for a read set activation job.

" + }, + "StartReadSetExportJobRequest":{ + "type":"structure", + "required":[ + "destination", + "roleArn", + "sequenceStoreId", + "sources" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

To ensure that jobs don't run multiple times, specify a unique token for each job.

" + }, + "destination":{ + "shape":"S3Destination", + "documentation":"

A location for exported files in Amazon S3.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

A service role for the job.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + }, + "sources":{ + "shape":"StartReadSetExportJobRequestSourcesList", + "documentation":"

Sources for the job.

" + } + } + }, + "StartReadSetExportJobRequestSourcesList":{ + "type":"list", + "member":{"shape":"ExportReadSet"}, + "max":100, + "min":1 + }, + "StartReadSetExportJobResponse":{ + "type":"structure", + "required":[ + "creationTime", + "destination", + "id", + "sequenceStoreId", + "status" + ], + "members":{ + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "destination":{ + "shape":"S3Destination", + "documentation":"

The job's output location.

" + }, + "id":{ + "shape":"ExportJobId", + "documentation":"

The job's ID.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

" + }, + "status":{ + "shape":"ReadSetExportJobStatus", + "documentation":"

The job's status.

" + } + } + }, + "StartReadSetImportJobRequest":{ + "type":"structure", + "required":[ + "roleArn", + "sequenceStoreId", + "sources" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

To ensure that jobs don't run multiple times, specify a unique token for each job.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

A service role for the job.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

", + "location":"uri", + "locationName":"sequenceStoreId" + }, + "sources":{ + "shape":"StartReadSetImportJobRequestSourcesList", + "documentation":"

Source files to import.

" + } + } + }, + "StartReadSetImportJobRequestSourcesList":{ + "type":"list", + "member":{"shape":"StartReadSetImportJobSourceItem"}, + "max":100, + "min":1 + }, + "StartReadSetImportJobResponse":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "roleArn", + "sequenceStoreId", + "status" + ], + "members":{ + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "id":{ + "shape":"ImportJobId", + "documentation":"

The job's ID.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The job's service role ARN.

" + }, + "sequenceStoreId":{ + "shape":"SequenceStoreId", + "documentation":"

The read set's sequence store ID.

" + }, + "status":{ + "shape":"ReadSetImportJobStatus", + "documentation":"

The job's status.

" + } + } + }, + "StartReadSetImportJobSourceItem":{ + "type":"structure", + "required":[ + "referenceArn", + "sampleId", + "sourceFileType", + "sourceFiles", + "subjectId" + ], + "members":{ + "description":{ + "shape":"ReadSetDescription", + "documentation":"

The source's description.

" + }, + "generatedFrom":{ + "shape":"GeneratedFrom", + "documentation":"

Where the source originated.

" + }, + "name":{ + "shape":"ReadSetName", + "documentation":"

The source's name.

" + }, + "referenceArn":{ + "shape":"ReferenceArn", + "documentation":"

The source's reference ARN.

" + }, + "sampleId":{ + "shape":"SampleId", + "documentation":"

The source's sample ID.

" + }, + "sourceFileType":{ + "shape":"FileType", + "documentation":"

The source's file type.

" + }, + "sourceFiles":{ + "shape":"SourceFiles", + "documentation":"

The source files' location in Amazon S3.

" + }, + "subjectId":{ + "shape":"SubjectId", + "documentation":"

The source's subject ID.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The source's tags.

" + } + }, + "documentation":"

A source for a read set import job.

" + }, + "StartReferenceImportJobRequest":{ + "type":"structure", + "required":[ + "referenceStoreId", + "roleArn", + "sources" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

To ensure that jobs don't run multiple times, specify a unique token for each job.

" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The job's reference store ID.

", + "location":"uri", + "locationName":"referenceStoreId" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

A service role for the job.

" + }, + "sources":{ + "shape":"StartReferenceImportJobRequestSourcesList", + "documentation":"

Sources for the job.

" + } + } + }, + "StartReferenceImportJobRequestSourcesList":{ + "type":"list", + "member":{"shape":"StartReferenceImportJobSourceItem"}, + "max":100, + "min":1 + }, + "StartReferenceImportJobResponse":{ + "type":"structure", + "required":[ + "creationTime", + "id", + "referenceStoreId", + "roleArn", + "status" + ], + "members":{ + "creationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

When the job was created.

" + }, + "id":{ + "shape":"ImportJobId", + "documentation":"

The job's ID.

" + }, + "referenceStoreId":{ + "shape":"ReferenceStoreId", + "documentation":"

The job's reference store ID.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The job's service role ARN.

" + }, + "status":{ + "shape":"ReferenceImportJobStatus", + "documentation":"

The job's status.

" + } + } + }, + "StartReferenceImportJobSourceItem":{ + "type":"structure", + "required":[ + "name", + "sourceFile" + ], + "members":{ + "description":{ + "shape":"ReferenceDescription", + "documentation":"

The source's description.

" + }, + "name":{ + "shape":"ReferenceName", + "documentation":"

The source's name.

" + }, + "sourceFile":{ + "shape":"S3Uri", + "documentation":"

The source file's location in Amazon S3.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The source's tags.

" + } + }, + "documentation":"

A source for a reference import job.

" + }, + "StartRunRequest":{ + "type":"structure", + "required":[ + "requestId", + "roleArn" + ], + "members":{ + "logLevel":{ + "shape":"RunLogLevel", + "documentation":"

A log level for the run.

" + }, + "name":{ + "shape":"RunName", + "documentation":"

A name for the run.

" + }, + "outputUri":{ + "shape":"RunOutputUri", + "documentation":"

An output URI for the run.

" + }, + "parameters":{ + "shape":"RunParameters", + "documentation":"

Parameters for the run.

" + }, + "priority":{ + "shape":"StartRunRequestPriorityInteger", + "documentation":"

A priority for the run.

" + }, + "requestId":{ + "shape":"RunRequestId", + "documentation":"

A request ID for the run.

", + "idempotencyToken":true + }, + "roleArn":{ + "shape":"RunRoleArn", + "documentation":"

A service role for the run.

" + }, + "runGroupId":{ + "shape":"RunGroupId", + "documentation":"

The run's group ID.

" + }, + "runId":{ + "shape":"RunId", + "documentation":"

The run's ID.

" + }, + "storageCapacity":{ + "shape":"StartRunRequestStorageCapacityInteger", + "documentation":"

A storage capacity for the run.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Tags for the run.

" + }, + "workflowId":{ + "shape":"WorkflowId", + "documentation":"

The run's workflow ID.

" + }, + "workflowType":{ + "shape":"WorkflowType", + "documentation":"

The run's workflows type.

" + } + } + }, + "StartRunRequestPriorityInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":0 + }, + "StartRunRequestStorageCapacityInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":0 + }, + "StartRunResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"RunArn", + "documentation":"

The run's ARN.

" + }, + "id":{ + "shape":"RunId", + "documentation":"

The run's ID.

" + }, + "status":{ + "shape":"RunStatus", + "documentation":"

The run's status.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The run's tags.

" + } + } + }, + "StartVariantImportRequest":{ + "type":"structure", + "required":[ + "destinationName", + "items", + "roleArn" + ], + "members":{ + "destinationName":{ + "shape":"StoreName", + "documentation":"

The destination variant store for the job.

" + }, + "items":{ + "shape":"VariantImportItemSources", + "documentation":"

Items to import.

" + }, + "roleArn":{ + "shape":"Arn", + "documentation":"

A service role for the job.

" + }, + "runLeftNormalization":{ + "shape":"RunLeftNormalization", + "documentation":"

The job's left normalization setting.

" + } + } + }, + "StartVariantImportResponse":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"ResourceId", + "documentation":"

The job's ID.

" + } + } + }, + "StatusMessage":{ + "type":"string", + "max":1000, + "min":0 + }, + "StoreDescription":{ + "type":"string", + "max":500, + "min":0 + }, + "StoreFormat":{ + "type":"string", + "enum":[ + "GFF", + "TSV", + "VCF" + ] + }, + "StoreName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"^([a-z]){1}([a-z0-9_]){2,254}$" + }, + "StoreOptions":{ + "type":"structure", + "members":{ + "tsvStoreOptions":{ + "shape":"TsvStoreOptions", + "documentation":"

File settings for a TSV store.

" + } + }, + "documentation":"

Settings for a store.

", + "union":true + }, + "StoreStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE", + "FAILED" + ] + }, + "String":{"type":"string"}, + "SubjectId":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "TagArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:.+$" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"TagArn", + "documentation":"

The resource's ARN.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagResourceRequestTagsMap", + "documentation":"

Tags for the resource.

" + } + } + }, + "TagResourceRequestTagsMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TaskId":{ + "type":"string", + "max":18, + "min":1, + "pattern":"^[0-9]+$" + }, + "TaskList":{ + "type":"list", + "member":{"shape":"TaskListItem"} + }, + "TaskListItem":{ + "type":"structure", + "members":{ + "cpus":{ + "shape":"TaskListItemCpusInteger", + "documentation":"

The task's CPU count.

" + }, + "creationTime":{ + "shape":"TaskTimestamp", + "documentation":"

When the task was created.

" + }, + "memory":{ + "shape":"TaskListItemMemoryInteger", + "documentation":"

The task's memory.

" + }, + "name":{ + "shape":"TaskName", + "documentation":"

The task's name.

" + }, + "startTime":{ + "shape":"TaskTimestamp", + "documentation":"

When the task started.

" + }, + "status":{ + "shape":"TaskStatus", + "documentation":"

The task's status.

" + }, + "stopTime":{ + "shape":"TaskTimestamp", + "documentation":"

When the task stopped.

" + }, + "taskId":{ + "shape":"TaskId", + "documentation":"

The task's ID.

" + } + }, + "documentation":"

A workflow run task.

" + }, + "TaskListItemCpusInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "TaskListItemMemoryInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "TaskListToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "TaskLogStream":{ + "type":"string", + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "TaskName":{ + "type":"string", + "max":128, + "min":1 + }, + "TaskStatus":{ + "type":"string", + "enum":[ + "PENDING", + "STARTING", + "RUNNING", + "STOPPING", + "COMPLETED", + "CANCELLED", + "FAILED" + ], + "max":64, + "min":1 + }, + "TaskStatusMessage":{ + "type":"string", + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "TaskTimestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request was denied due to request throttling.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "TsvOptions":{ + "type":"structure", + "members":{ + "readOptions":{ + "shape":"ReadOptions", + "documentation":"

The file's read options.

" + } + }, + "documentation":"

Formatting options for a TSV file.

" + }, + "TsvStoreOptions":{ + "type":"structure", + "members":{ + "annotationType":{ + "shape":"AnnotationType", + "documentation":"

The store's annotation type.

" + }, + "formatToHeader":{ + "shape":"FormatToHeader", + "documentation":"

The store's header key to column name mapping.

" + }, + "schema":{ + "shape":"TsvStoreOptionsSchemaList", + "documentation":"

The store's schema.

" + } + }, + "documentation":"

File settings for a TSV store.

" + }, + "TsvStoreOptionsSchemaList":{ + "type":"list", + "member":{"shape":"SchemaItem"}, + "max":5000, + "min":1 + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"TagArn", + "documentation":"

The resource's ARN.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

Keys of tags to remove.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAnnotationStoreRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "description":{ + "shape":"StoreDescription", + "documentation":"

A description for the store.

" + }, + "name":{ + "shape":"String", + "documentation":"

A name for the store.

", + "location":"uri", + "locationName":"name" + } + } + }, + "UpdateAnnotationStoreResponse":{ + "type":"structure", + "required":[ + "creationTime", + "description", + "id", + "name", + "reference", + "status", + "updateTime" + ], + "members":{ + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"StoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"ResourceId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The store's genome reference.

" + }, + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + }, + "storeFormat":{ + "shape":"StoreFormat", + "documentation":"

The annotation file format of the store.

" + }, + "storeOptions":{ + "shape":"StoreOptions", + "documentation":"

Parsing options for the store.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the store was updated.

" + } + } + }, + "UpdateRunGroupRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"RunGroupId", + "documentation":"

The group's ID.

", + "location":"uri", + "locationName":"id" + }, + "maxCpus":{ + "shape":"UpdateRunGroupRequestMaxCpusInteger", + "documentation":"

The maximum number of CPUs to use.

" + }, + "maxDuration":{ + "shape":"UpdateRunGroupRequestMaxDurationInteger", + "documentation":"

The maximum amount of time to run.

" + }, + "maxRuns":{ + "shape":"UpdateRunGroupRequestMaxRunsInteger", + "documentation":"

The maximum number of concurrent runs for the group.

" + }, + "name":{ + "shape":"RunGroupName", + "documentation":"

A name for the group.

" + } + } + }, + "UpdateRunGroupRequestMaxCpusInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "UpdateRunGroupRequestMaxDurationInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "UpdateRunGroupRequestMaxRunsInteger":{ + "type":"integer", + "box":true, + "max":100000, + "min":1 + }, + "UpdateTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "UpdateVariantStoreRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "description":{ + "shape":"StoreDescription", + "documentation":"

A description for the store.

" + }, + "name":{ + "shape":"String", + "documentation":"

A name for the store.

", + "location":"uri", + "locationName":"name" + } + } + }, + "UpdateVariantStoreResponse":{ + "type":"structure", + "required":[ + "creationTime", + "description", + "id", + "name", + "reference", + "status", + "updateTime" + ], + "members":{ + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"StoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"ResourceId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The store's genome reference.

" + }, + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the store was updated.

" + } + } + }, + "UpdateWorkflowRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "description":{ + "shape":"WorkflowDescription", + "documentation":"

A description for the workflow.

" + }, + "id":{ + "shape":"WorkflowId", + "documentation":"

The workflow's ID.

", + "location":"uri", + "locationName":"id" + }, + "name":{ + "shape":"WorkflowName", + "documentation":"

A name for the workflow.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The input fails to satisfy the constraints specified by an AWS service.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "VariantImportItemDetail":{ + "type":"structure", + "required":[ + "jobStatus", + "source" + ], + "members":{ + "jobStatus":{ + "shape":"JobStatus", + "documentation":"

The item's job status.

" + }, + "source":{ + "shape":"S3Uri", + "documentation":"

The source file's location in Amazon S3.

" + } + }, + "documentation":"

Details about an imported variant item.

" + }, + "VariantImportItemDetails":{ + "type":"list", + "member":{"shape":"VariantImportItemDetail"}, + "max":1, + "min":1 + }, + "VariantImportItemSource":{ + "type":"structure", + "required":["source"], + "members":{ + "source":{ + "shape":"S3Uri", + "documentation":"

The source file's location in Amazon S3.

" + } + }, + "documentation":"

A imported variant item's source.

" + }, + "VariantImportItemSources":{ + "type":"list", + "member":{"shape":"VariantImportItemSource"}, + "max":1, + "min":1 + }, + "VariantImportJobItem":{ + "type":"structure", + "required":[ + "creationTime", + "destinationName", + "id", + "roleArn", + "status", + "updateTime" + ], + "members":{ + "completionTime":{ + "shape":"CompletionTime", + "documentation":"

When the job completed.

" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the job was created.

" + }, + "destinationName":{ + "shape":"String", + "documentation":"

The job's destination variant store.

" + }, + "id":{ + "shape":"String", + "documentation":"

The job's ID.

" + }, + "roleArn":{ + "shape":"Arn", + "documentation":"

The job's service role ARN.

" + }, + "runLeftNormalization":{ + "shape":"RunLeftNormalization", + "documentation":"

The job's left normalization setting.

" + }, + "status":{ + "shape":"JobStatus", + "documentation":"

The job's status.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the job was updated.

" + } + }, + "documentation":"

A variant import job.

" + }, + "VariantImportJobItems":{ + "type":"list", + "member":{"shape":"VariantImportJobItem"} + }, + "VariantStoreItem":{ + "type":"structure", + "required":[ + "creationTime", + "description", + "id", + "name", + "reference", + "sseConfig", + "status", + "statusMessage", + "storeArn", + "storeSizeBytes", + "updateTime" + ], + "members":{ + "creationTime":{ + "shape":"CreationTime", + "documentation":"

When the store was created.

" + }, + "description":{ + "shape":"StoreDescription", + "documentation":"

The store's description.

" + }, + "id":{ + "shape":"ResourceId", + "documentation":"

The store's ID.

" + }, + "name":{ + "shape":"String", + "documentation":"

The store's name.

" + }, + "reference":{ + "shape":"ReferenceItem", + "documentation":"

The store's genome reference.

" + }, + "sseConfig":{ + "shape":"SseConfig", + "documentation":"

The store's server-side encryption (SSE) settings.

" + }, + "status":{ + "shape":"StoreStatus", + "documentation":"

The store's status.

" + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

The store's status message.

" + }, + "storeArn":{ + "shape":"Arn", + "documentation":"

The store's ARN.

" + }, + "storeSizeBytes":{ + "shape":"Long", + "documentation":"

The store's size in bytes.

" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

When the store was updated.

" + } + }, + "documentation":"

A variant store.

" + }, + "VariantStoreItems":{ + "type":"list", + "member":{"shape":"VariantStoreItem"} + }, + "VcfOptions":{ + "type":"structure", + "members":{ + "ignoreFilterField":{ + "shape":"Boolean", + "documentation":"

The file's ignore filter field setting.

" + }, + "ignoreQualField":{ + "shape":"Boolean", + "documentation":"

The file's ignore qual field setting.

" + } + }, + "documentation":"

Formatting options for a VCF file.

" + }, + "WorkflowArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:.+$" + }, + "WorkflowDefinition":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "WorkflowDescription":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "WorkflowDigest":{ + "type":"string", + "max":64, + "min":1 + }, + "WorkflowEngine":{ + "type":"string", + "enum":[ + "WDL", + "NEXTFLOW" + ], + "max":64, + "min":1 + }, + "WorkflowExport":{ + "type":"string", + "enum":["DEFINITION"], + "max":64, + "min":1 + }, + "WorkflowExportList":{ + "type":"list", + "member":{"shape":"WorkflowExport"}, + "max":32, + "min":0 + }, + "WorkflowId":{ + "type":"string", + "max":18, + "min":1, + "pattern":"^[0-9]+$" + }, + "WorkflowList":{ + "type":"list", + "member":{"shape":"WorkflowListItem"} + }, + "WorkflowListItem":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"WorkflowArn", + "documentation":"

The workflow's ARN.

" + }, + "creationTime":{ + "shape":"WorkflowTimestamp", + "documentation":"

When the workflow was created.

" + }, + "digest":{ + "shape":"WorkflowDigest", + "documentation":"

The workflow's digest.

" + }, + "id":{ + "shape":"WorkflowId", + "documentation":"

The workflow's ID.

" + }, + "name":{ + "shape":"WorkflowName", + "documentation":"

The workflow's name.

" + }, + "status":{ + "shape":"WorkflowStatus", + "documentation":"

The workflow's status.

" + }, + "type":{ + "shape":"WorkflowType", + "documentation":"

The workflow's type.

" + } + }, + "documentation":"

A workflow.

" + }, + "WorkflowListToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "WorkflowMain":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "WorkflowName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "WorkflowParameter":{ + "type":"structure", + "members":{ + "description":{ + "shape":"WorkflowParameterDescription", + "documentation":"

The parameter's description.

" + }, + "optional":{ + "shape":"Boolean", + "documentation":"

Whether the parameter is optional.

" + } + }, + "documentation":"

A workflow parameter.

" + }, + "WorkflowParameterDescription":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "WorkflowParameterName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "WorkflowParameterTemplate":{ + "type":"map", + "key":{"shape":"WorkflowParameterName"}, + "value":{"shape":"WorkflowParameter"}, + "max":1000, + "min":1 + }, + "WorkflowRequestId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "WorkflowStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETED", + "FAILED" + ], + "max":64, + "min":1 + }, + "WorkflowStatusMessage":{ + "type":"string", + "pattern":"^[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+$" + }, + "WorkflowTimestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "WorkflowType":{ + "type":"string", + "enum":["PRIVATE"], + "max":64, + "min":1 + } + }, + "documentation":"

This is the Amazon Omics API Reference. For an introduction to the service, see What is Amazon Omics? in the Amazon Omics Developer Guide.

" +} diff --git a/botocore/data/omics/2022-11-28/waiters-2.json b/botocore/data/omics/2022-11-28/waiters-2.json new file mode 100644 index 0000000000..db1de32eed --- /dev/null +++ b/botocore/data/omics/2022-11-28/waiters-2.json @@ -0,0 +1,498 @@ +{ + "version" : 2, + "waiters" : { + "AnnotationImportJobCreated" : { + "description" : "Wait until an annotation import is completed", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetAnnotationImportJob", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "SUBMITTED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "IN_PROGRESS" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "CANCELLED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "COMPLETED" + } ] + }, + "AnnotationStoreCreated" : { + "description" : "Wait until an annotation store is created", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetAnnotationStore", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "ACTIVE" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "CREATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "UPDATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + } ] + }, + "AnnotationStoreDeleted" : { + "description" : "Wait until an annotation store is deleted.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetAnnotationStore", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "DELETED" + }, { + "matcher" : "error", + "state" : "success", + "expected" : "ResourceNotFoundException" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "DELETING" + } ] + }, + "ReadSetActivationJobCompleted" : { + "description" : "Wait until a job is completed.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetReadSetActivationJob", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "COMPLETED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "SUBMITTED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "IN_PROGRESS" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "CANCELLING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "CANCELLED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "COMPLETED_WITH_FAILURES" + } ] + }, + "ReadSetExportJobCompleted" : { + "description" : "Wait until a job is completed.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetReadSetExportJob", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "COMPLETED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "SUBMITTED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "IN_PROGRESS" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "CANCELLING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "CANCELLED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "COMPLETED_WITH_FAILURES" + } ] + }, + "ReadSetImportJobCompleted" : { + "description" : "Wait until a job is completed.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetReadSetImportJob", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "COMPLETED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "SUBMITTED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "IN_PROGRESS" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "CANCELLING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "CANCELLED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "COMPLETED_WITH_FAILURES" + } ] + }, + "ReferenceImportJobCompleted" : { + "description" : "Wait until a job is completed.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetReferenceImportJob", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "COMPLETED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "SUBMITTED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "IN_PROGRESS" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "CANCELLING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "CANCELLED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "COMPLETED_WITH_FAILURES" + } ] + }, + "RunCompleted" : { + "description" : "Wait until a run is completed.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetRun", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "COMPLETED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "PENDING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "STARTING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "RUNNING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "STOPPING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + } ] + }, + "RunRunning" : { + "description" : "Wait until a run is running.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetRun", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "RUNNING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "PENDING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "STARTING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "CANCELLED" + } ] + }, + "TaskCompleted" : { + "description" : "Wait until a task is completed.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetRunTask", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "COMPLETED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "PENDING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "STARTING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "RUNNING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "STOPPING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + } ] + }, + "TaskRunning" : { + "description" : "Wait until a task is running.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetRunTask", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "RUNNING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "PENDING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "STARTING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "CANCELLED" + } ] + }, + "VariantImportJobCreated" : { + "description" : "Wait until variant import is completed", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetVariantImportJob", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "SUBMITTED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "IN_PROGRESS" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "CANCELLED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "COMPLETED" + } ] + }, + "VariantStoreCreated" : { + "description" : "Wait until a variant store is created", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetVariantStore", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "ACTIVE" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "CREATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "UPDATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + } ] + }, + "VariantStoreDeleted" : { + "description" : "Wait until a variant store is deleted.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetVariantStore", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "DELETED" + }, { + "matcher" : "error", + "state" : "success", + "expected" : "ResourceNotFoundException" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "DELETING" + } ] + }, + "WorkflowActive" : { + "description" : "Wait until a workflow is active.", + "delay" : 3, + "maxAttempts" : 10, + "operation" : "GetWorkflow", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "ACTIVE" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "CREATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "UPDATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + } ] + } + } +} \ No newline at end of file diff --git a/botocore/data/opensearchserverless/2021-11-01/endpoint-rule-set-1.json b/botocore/data/opensearchserverless/2021-11-01/endpoint-rule-set-1.json new file mode 100644 index 0000000000..6c4f0a782a --- /dev/null +++ b/botocore/data/opensearchserverless/2021-11-01/endpoint-rule-set-1.json @@ -0,0 +1,309 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": true, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aoss-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aoss-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aoss.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://aoss.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] +} \ No newline at end of file diff --git a/botocore/data/opensearchserverless/2021-11-01/paginators-1.json b/botocore/data/opensearchserverless/2021-11-01/paginators-1.json new file mode 100644 index 0000000000..ea142457a6 --- /dev/null +++ b/botocore/data/opensearchserverless/2021-11-01/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/opensearchserverless/2021-11-01/service-2.json b/botocore/data/opensearchserverless/2021-11-01/service-2.json new file mode 100644 index 0000000000..2bbaf35336 --- /dev/null +++ b/botocore/data/opensearchserverless/2021-11-01/service-2.json @@ -0,0 +1,2399 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-11-01", + "endpointPrefix":"aoss", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"OpenSearch Service Serverless", + "serviceId":"OpenSearchServerless", + "signatureVersion":"v4", + "signingName":"aoss", + "targetPrefix":"OpenSearchServerless", + "uid":"opensearchserverless-2021-11-01" + }, + "operations":{ + "BatchGetCollection":{ + "name":"BatchGetCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetCollectionRequest"}, + "output":{"shape":"BatchGetCollectionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns attributes for one or more collections, including the collection endpoint and the OpenSearch Dashboards endpoint. For more information, see Creating and managing Amazon OpenSearch Serverless collections.

" + }, + "BatchGetVpcEndpoint":{ + "name":"BatchGetVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetVpcEndpointRequest"}, + "output":{"shape":"BatchGetVpcEndpointResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns attributes for one or more VPC endpoints associated with the current account. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

" + }, + "CreateAccessPolicy":{ + "name":"CreateAccessPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAccessPolicyRequest"}, + "output":{"shape":"CreateAccessPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a data access policy for OpenSearch Serverless. Access policies limit access to collections and the resources within them, and allow a user to access that data irrespective of the access mechanism or network source. For more information, see Data access control for Amazon OpenSearch Serverless.

", + "idempotent":true + }, + "CreateCollection":{ + "name":"CreateCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCollectionRequest"}, + "output":{"shape":"CreateCollectionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new OpenSearch Serverless collection. For more information, see Creating and managing Amazon OpenSearch Serverless collections.

", + "idempotent":true + }, + "CreateSecurityConfig":{ + "name":"CreateSecurityConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSecurityConfigRequest"}, + "output":{"shape":"CreateSecurityConfigResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Specifies a security configuration for OpenSearch Serverless. For more information, see SAML authentication for Amazon OpenSearch Serverless.

", + "idempotent":true + }, + "CreateSecurityPolicy":{ + "name":"CreateSecurityPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSecurityPolicyRequest"}, + "output":{"shape":"CreateSecurityPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a security policy to be used by one or more OpenSearch Serverless collections. Security policies provide access to a collection and its OpenSearch Dashboards endpoint from public networks or specific VPC endpoints. They also allow you to secure a collection with a KMS encryption key. For more information, see Network access for Amazon OpenSearch Serverless and Encryption at rest for Amazon OpenSearch Serverless.

", + "idempotent":true + }, + "CreateVpcEndpoint":{ + "name":"CreateVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcEndpointRequest"}, + "output":{"shape":"CreateVpcEndpointResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates an OpenSearch Serverless-managed interface VPC endpoint. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

", + "idempotent":true + }, + "DeleteAccessPolicy":{ + "name":"DeleteAccessPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccessPolicyRequest"}, + "output":{"shape":"DeleteAccessPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes an OpenSearch Serverless access policy. For more information, see Data access control for Amazon OpenSearch Serverless.

", + "idempotent":true + }, + "DeleteCollection":{ + "name":"DeleteCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCollectionRequest"}, + "output":{"shape":"DeleteCollectionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes an OpenSearch Serverless collection. For more information, see Creating and managing Amazon OpenSearch Serverless collections.

", + "idempotent":true + }, + "DeleteSecurityConfig":{ + "name":"DeleteSecurityConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSecurityConfigRequest"}, + "output":{"shape":"DeleteSecurityConfigResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes a security configuration for OpenSearch Serverless. For more information, see SAML authentication for Amazon OpenSearch Serverless.

", + "idempotent":true + }, + "DeleteSecurityPolicy":{ + "name":"DeleteSecurityPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSecurityPolicyRequest"}, + "output":{"shape":"DeleteSecurityPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes an OpenSearch Serverless security policy.

", + "idempotent":true + }, + "DeleteVpcEndpoint":{ + "name":"DeleteVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcEndpointRequest"}, + "output":{"shape":"DeleteVpcEndpointResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes an OpenSearch Serverless-managed interface endpoint. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

", + "idempotent":true + }, + "GetAccessPolicy":{ + "name":"GetAccessPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccessPolicyRequest"}, + "output":{"shape":"GetAccessPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns an OpenSearch Serverless access policy. For more information, see Data access control for Amazon OpenSearch Serverless.

" + }, + "GetAccountSettings":{ + "name":"GetAccountSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccountSettingsRequest"}, + "output":{"shape":"GetAccountSettingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns account-level settings related to OpenSearch Serverless.

" + }, + "GetPoliciesStats":{ + "name":"GetPoliciesStats", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPoliciesStatsRequest"}, + "output":{"shape":"GetPoliciesStatsResponse"}, + "errors":[ + {"shape":"InternalServerException"} + ], + "documentation":"

Returns statistical information about your OpenSearch Serverless access policies, security configurations, and security policies.

" + }, + "GetSecurityConfig":{ + "name":"GetSecurityConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSecurityConfigRequest"}, + "output":{"shape":"GetSecurityConfigResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns information about an OpenSearch Serverless security configuration. For more information, see SAML authentication for Amazon OpenSearch Serverless.

" + }, + "GetSecurityPolicy":{ + "name":"GetSecurityPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSecurityPolicyRequest"}, + "output":{"shape":"GetSecurityPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns information about a configured OpenSearch Serverless security policy. For more information, see Network access for Amazon OpenSearch Serverless and Encryption at rest for Amazon OpenSearch Serverless.

" + }, + "ListAccessPolicies":{ + "name":"ListAccessPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccessPoliciesRequest"}, + "output":{"shape":"ListAccessPoliciesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns information about a list of OpenSearch Serverless access policies.

" + }, + "ListCollections":{ + "name":"ListCollections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCollectionsRequest"}, + "output":{"shape":"ListCollectionsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all OpenSearch Serverless collections. For more information, see Creating and managing Amazon OpenSearch Serverless collections.

Make sure to include an empty request body {} if you don't include any collection filters in the request.

" + }, + "ListSecurityConfigs":{ + "name":"ListSecurityConfigs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSecurityConfigsRequest"}, + "output":{"shape":"ListSecurityConfigsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns information about configured OpenSearch Serverless security configurations. For more information, see SAML authentication for Amazon OpenSearch Serverless.

" + }, + "ListSecurityPolicies":{ + "name":"ListSecurityPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSecurityPoliciesRequest"}, + "output":{"shape":"ListSecurityPoliciesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns information about configured OpenSearch Serverless security policies.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the tags for an OpenSearch Serverless resource. For more information, see Tagging Amazon OpenSearch Serverless collections.

" + }, + "ListVpcEndpoints":{ + "name":"ListVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVpcEndpointsRequest"}, + "output":{"shape":"ListVpcEndpointsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the OpenSearch Serverless-managed interface VPC endpoints associated with the current account. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Associates tags with an OpenSearch Serverless resource. For more information, see Tagging Amazon OpenSearch Serverless collections.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Removes a tag or set of tags from an OpenSearch Serverless resource. For more information, see Tagging Amazon OpenSearch Serverless collections.

" + }, + "UpdateAccessPolicy":{ + "name":"UpdateAccessPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAccessPolicyRequest"}, + "output":{"shape":"UpdateAccessPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates an OpenSearch Serverless access policy. For more information, see Data access control for Amazon OpenSearch Serverless.

", + "idempotent":true + }, + "UpdateAccountSettings":{ + "name":"UpdateAccountSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAccountSettingsRequest"}, + "output":{"shape":"UpdateAccountSettingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Update the OpenSearch Serverless settings for the current Amazon Web Services account. For more information, see Autoscaling.

" + }, + "UpdateCollection":{ + "name":"UpdateCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateCollectionRequest"}, + "output":{"shape":"UpdateCollectionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates an OpenSearch Serverless collection.

", + "idempotent":true + }, + "UpdateSecurityConfig":{ + "name":"UpdateSecurityConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSecurityConfigRequest"}, + "output":{"shape":"UpdateSecurityConfigResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates a security configuration for OpenSearch Serverless. For more information, see SAML authentication for Amazon OpenSearch Serverless.

", + "idempotent":true + }, + "UpdateSecurityPolicy":{ + "name":"UpdateSecurityPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSecurityPolicyRequest"}, + "output":{"shape":"UpdateSecurityPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates an OpenSearch Serverless security policy. For more information, see Network access for Amazon OpenSearch Serverless and Encryption at rest for Amazon OpenSearch Serverless.

", + "idempotent":true + }, + "UpdateVpcEndpoint":{ + "name":"UpdateVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateVpcEndpointRequest"}, + "output":{"shape":"UpdateVpcEndpointResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates an OpenSearch Serverless-managed interface endpoint. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

", + "idempotent":true + } + }, + "shapes":{ + "AccessPolicyDetail":{ + "type":"structure", + "members":{ + "createdDate":{ + "shape":"Long", + "documentation":"

The date the policy was created.

" + }, + "description":{ + "shape":"PolicyDescription", + "documentation":"

The description of the policy.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The timestamp of when the policy was last modified.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the policy.

" + }, + "policy":{ + "shape":"Document", + "documentation":"

The JSON policy document without any whitespaces.

" + }, + "policyVersion":{ + "shape":"PolicyVersion", + "documentation":"

The version of the policy.

" + }, + "type":{ + "shape":"AccessPolicyType", + "documentation":"

The type of access policy.

" + } + }, + "documentation":"

Details about an OpenSearch Serverless access policy.

" + }, + "AccessPolicyStats":{ + "type":"structure", + "members":{ + "DataPolicyCount":{ + "shape":"Long", + "documentation":"

The number of data access policies in the current account.

" + } + }, + "documentation":"

Statistics for an OpenSearch Serverless access policy.

" + }, + "AccessPolicySummaries":{ + "type":"list", + "member":{"shape":"AccessPolicySummary"} + }, + "AccessPolicySummary":{ + "type":"structure", + "members":{ + "createdDate":{ + "shape":"Long", + "documentation":"

The Epoch time when the access policy was created.

" + }, + "description":{ + "shape":"PolicyDescription", + "documentation":"

The description of the access policy.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The date and time when the collection was last modified.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the access policy.

" + }, + "policyVersion":{ + "shape":"PolicyVersion", + "documentation":"

The version of the policy.

" + }, + "type":{ + "shape":"AccessPolicyType", + "documentation":"

The type of access policy. Currently the only available type is data.

" + } + }, + "documentation":"

A summary of the data access policy.

" + }, + "AccessPolicyType":{ + "type":"string", + "enum":["data"] + }, + "AccountSettingsDetail":{ + "type":"structure", + "members":{ + "capacityLimits":{"shape":"CapacityLimits"} + }, + "documentation":"

OpenSearch Serverless-related information for the current account.

" + }, + "Arn":{ + "type":"string", + "max":1011, + "min":1 + }, + "BatchGetCollectionRequest":{ + "type":"structure", + "members":{ + "ids":{ + "shape":"CollectionIds", + "documentation":"

A list of collection IDs. You can't provide names and IDs in the same request. The ID is part of the collection endpoint. You can also retrieve it using the ListCollections API.

" + }, + "names":{ + "shape":"CollectionNames", + "documentation":"

A list of collection names. You can't provide names and IDs in the same request.

" + } + } + }, + "BatchGetCollectionResponse":{ + "type":"structure", + "members":{ + "collectionDetails":{ + "shape":"CollectionDetails", + "documentation":"

Details about each collection.

" + }, + "collectionErrorDetails":{ + "shape":"CollectionErrorDetails", + "documentation":"

Error information for the request.

" + } + } + }, + "BatchGetVpcEndpointRequest":{ + "type":"structure", + "required":["ids"], + "members":{ + "ids":{ + "shape":"VpcEndpointIds", + "documentation":"

A list of VPC endpoint identifiers.

" + } + } + }, + "BatchGetVpcEndpointResponse":{ + "type":"structure", + "members":{ + "vpcEndpointDetails":{ + "shape":"VpcEndpointDetails", + "documentation":"

Details about the specified VPC endpoint.

" + }, + "vpcEndpointErrorDetails":{ + "shape":"VpcEndpointErrorDetails", + "documentation":"

Error information for a failed request.

" + } + } + }, + "CapacityLimits":{ + "type":"structure", + "members":{ + "maxIndexingCapacityInOCU":{ + "shape":"IndexingCapacityValue", + "documentation":"

The maximum indexing capacity for collections.

" + }, + "maxSearchCapacityInOCU":{ + "shape":"SearchCapacityValue", + "documentation":"

The maximum search capacity for collections.

" + } + }, + "documentation":"

The maximum capacity limits for all OpenSearch Serverless collections, in OpenSearch Compute Units (OCUs). These limits are used to scale your collections based on the current workload. For more information, see Autoscaling.

" + }, + "ClientToken":{ + "type":"string", + "max":512, + "min":1 + }, + "CollectionDetail":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the collection.

" + }, + "collectionEndpoint":{ + "shape":"String", + "documentation":"

Collection-specific endpoint used to submit index, search, and data upload requests to an OpenSearch Serverless collection.

" + }, + "createdDate":{ + "shape":"Long", + "documentation":"

The Epoch time when the collection was created.

" + }, + "dashboardEndpoint":{ + "shape":"String", + "documentation":"

Collection-specific endpoint used to access OpenSearch Dashboards.

" + }, + "description":{ + "shape":"String", + "documentation":"

A description of the collection.

" + }, + "id":{ + "shape":"CollectionId", + "documentation":"

A unique identifier for the collection.

" + }, + "kmsKeyArn":{ + "shape":"String", + "documentation":"

The ARN of the Amazon Web Services KMS key used to encrypt the collection.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The date and time when the collection was last modified.

" + }, + "name":{ + "shape":"CollectionName", + "documentation":"

The name of the collection.

" + }, + "status":{ + "shape":"CollectionStatus", + "documentation":"

The current status of the collection.

" + }, + "type":{ + "shape":"CollectionType", + "documentation":"

The type of collection.

" + } + }, + "documentation":"

Details about each OpenSearch Serverless collection, including the collection endpoint and the OpenSearch Dashboards endpoint.

" + }, + "CollectionDetails":{ + "type":"list", + "member":{"shape":"CollectionDetail"} + }, + "CollectionErrorDetail":{ + "type":"structure", + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

The error code for the request. For example, NOT_FOUND.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

A description of the error. For example, The specified Collection is not found.

" + }, + "id":{ + "shape":"CollectionId", + "documentation":"

If the request contains collection IDs, the response includes the IDs provided in the request.

" + }, + "name":{ + "shape":"CollectionName", + "documentation":"

If the request contains collection names, the response includes the names provided in the request.

" + } + }, + "documentation":"

Error information for an OpenSearch Serverless request.

" + }, + "CollectionErrorDetails":{ + "type":"list", + "member":{"shape":"CollectionErrorDetail"} + }, + "CollectionFilters":{ + "type":"structure", + "members":{ + "name":{ + "shape":"CollectionName", + "documentation":"

The name of the collection.

" + }, + "status":{ + "shape":"CollectionStatus", + "documentation":"

The current status of the collection.

" + } + }, + "documentation":"

List of filter keys that you can use for LIST, UPDATE, and DELETE requests to OpenSearch Serverless collections.

" + }, + "CollectionId":{ + "type":"string", + "max":40, + "min":3, + "pattern":"^[a-z0-9]{3,40}$" + }, + "CollectionIds":{ + "type":"list", + "member":{"shape":"CollectionId"}, + "max":100, + "min":1 + }, + "CollectionName":{ + "type":"string", + "max":32, + "min":3, + "pattern":"^[a-z][a-z0-9-]+$" + }, + "CollectionNames":{ + "type":"list", + "member":{"shape":"CollectionName"}, + "max":100, + "min":1 + }, + "CollectionStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "ACTIVE", + "FAILED" + ] + }, + "CollectionSummaries":{ + "type":"list", + "member":{"shape":"CollectionSummary"} + }, + "CollectionSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the collection.

" + }, + "id":{ + "shape":"CollectionId", + "documentation":"

The unique identifier of the collection.

" + }, + "name":{ + "shape":"CollectionName", + "documentation":"

The name of the collection.

" + }, + "status":{ + "shape":"CollectionStatus", + "documentation":"

The current status of the collection.

" + } + }, + "documentation":"

Details about each OpenSearch Serverless collection.

" + }, + "CollectionType":{ + "type":"string", + "enum":[ + "SEARCH", + "TIMESERIES" + ] + }, + "ConfigDescription":{ + "type":"string", + "max":1000, + "min":1 + }, + "ConfigName":{ + "type":"string", + "max":32, + "min":3, + "pattern":"^[a-z][a-z0-9-]+$" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

When creating a collection, thrown when a collection with the same name already exists or is being created. When deleting a collection, thrown when the collection is not in the ACTIVE or FAILED state.

", + "exception":true + }, + "CreateAccessPolicyRequest":{ + "type":"structure", + "required":[ + "name", + "policy", + "type" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"PolicyDescription", + "documentation":"

A description of the policy. Typically used to store information about the permissions defined in the policy.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the policy.

" + }, + "policy":{ + "shape":"PolicyDocument", + "documentation":"

The JSON policy document to use as the content for the policy.

" + }, + "type":{ + "shape":"AccessPolicyType", + "documentation":"

The type of policy.

" + } + } + }, + "CreateAccessPolicyResponse":{ + "type":"structure", + "members":{ + "accessPolicyDetail":{ + "shape":"AccessPolicyDetail", + "documentation":"

Details about the created access policy.

" + } + } + }, + "CreateCollectionDetail":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the collection.

" + }, + "createdDate":{ + "shape":"Long", + "documentation":"

The Epoch time when the collection was created.

" + }, + "description":{ + "shape":"String", + "documentation":"

A description of the collection.

" + }, + "id":{ + "shape":"CollectionId", + "documentation":"

The unique identifier of the collection.

" + }, + "kmsKeyArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key with which to encrypt the collection.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The date and time when the collection was last modified.

" + }, + "name":{ + "shape":"CollectionName", + "documentation":"

The name of the collection.

" + }, + "status":{ + "shape":"CollectionStatus", + "documentation":"

The current status of the collection.

" + }, + "type":{ + "shape":"CollectionType", + "documentation":"

The type of collection.

" + } + }, + "documentation":"

Details about the created OpenSearch Serverless collection.

" + }, + "CreateCollectionRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"CreateCollectionRequestDescriptionString", + "documentation":"

Description of the collection.

" + }, + "name":{ + "shape":"CollectionName", + "documentation":"

Name of the collection.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

An arbitrary set of tags (key–value pairs) to associate with the OpenSearch Serverless collection.

" + }, + "type":{ + "shape":"CollectionType", + "documentation":"

The type of collection.

" + } + } + }, + "CreateCollectionRequestDescriptionString":{ + "type":"string", + "max":1000, + "min":0 + }, + "CreateCollectionResponse":{ + "type":"structure", + "members":{ + "createCollectionDetail":{ + "shape":"CreateCollectionDetail", + "documentation":"

Details about the collection.

" + } + } + }, + "CreateSecurityConfigRequest":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"ConfigDescription", + "documentation":"

A description of the security configuration.

" + }, + "name":{ + "shape":"ConfigName", + "documentation":"

The name of the security configuration.

" + }, + "samlOptions":{ + "shape":"SamlConfigOptions", + "documentation":"

Describes SAML options in in the form of a key-value map.

" + }, + "type":{ + "shape":"SecurityConfigType", + "documentation":"

The type of security configuration.

" + } + } + }, + "CreateSecurityConfigResponse":{ + "type":"structure", + "members":{ + "securityConfigDetail":{ + "shape":"SecurityConfigDetail", + "documentation":"

Details about the created security configuration.

" + } + } + }, + "CreateSecurityPolicyRequest":{ + "type":"structure", + "required":[ + "name", + "policy", + "type" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"PolicyDescription", + "documentation":"

A description of the policy. Typically used to store information about the permissions defined in the policy.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the policy.

" + }, + "policy":{ + "shape":"PolicyDocument", + "documentation":"

The JSON policy document to use as the content for the new policy.

" + }, + "type":{ + "shape":"SecurityPolicyType", + "documentation":"

The type of security policy.

" + } + } + }, + "CreateSecurityPolicyResponse":{ + "type":"structure", + "members":{ + "securityPolicyDetail":{ + "shape":"SecurityPolicyDetail", + "documentation":"

Details about the created security policy.

" + } + } + }, + "CreateVpcEndpointDetail":{ + "type":"structure", + "members":{ + "id":{ + "shape":"VpcEndpointId", + "documentation":"

The unique identifier of the endpoint.

" + }, + "name":{ + "shape":"VpcEndpointName", + "documentation":"

The name of the endpoint.

" + }, + "status":{ + "shape":"VpcEndpointStatus", + "documentation":"

The current status in the endpoint creation process.

" + } + }, + "documentation":"

Creation details for an OpenSearch Serverless-managed interface endpoint. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

" + }, + "CreateVpcEndpointRequest":{ + "type":"structure", + "required":[ + "name", + "subnetIds", + "vpcId" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "name":{ + "shape":"VpcEndpointName", + "documentation":"

The name of the interface endpoint.

" + }, + "securityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The unique identifiers of the security groups that define the ports, protocols, and sources for inbound traffic that you are authorizing into your endpoint.

" + }, + "subnetIds":{ + "shape":"SubnetIds", + "documentation":"

The ID of one or more subnets from which you'll access OpenSearch Serverless.

" + }, + "vpcId":{ + "shape":"VpcId", + "documentation":"

The ID of the VPC from which you'll access OpenSearch Serverless.

" + } + } + }, + "CreateVpcEndpointResponse":{ + "type":"structure", + "members":{ + "createVpcEndpointDetail":{ + "shape":"CreateVpcEndpointDetail", + "documentation":"

Details about the created interface VPC endpoint.

" + } + } + }, + "DeleteAccessPolicyRequest":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the policy to delete.

" + }, + "type":{ + "shape":"AccessPolicyType", + "documentation":"

The type of policy.

" + } + } + }, + "DeleteAccessPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteCollectionDetail":{ + "type":"structure", + "members":{ + "id":{ + "shape":"CollectionId", + "documentation":"

The unique identifier of the collection.

" + }, + "name":{ + "shape":"CollectionName", + "documentation":"

The name of the collection.

" + }, + "status":{ + "shape":"CollectionStatus", + "documentation":"

The current status of the collection.

" + } + }, + "documentation":"

Details about a deleted OpenSearch Serverless collection.

" + }, + "DeleteCollectionRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "id":{ + "shape":"CollectionId", + "documentation":"

The unique identifier of the collection. For example, 1iu5usc406kd. The ID is part of the collection endpoint. You can also retrieve it using the ListCollections API.

" + } + } + }, + "DeleteCollectionResponse":{ + "type":"structure", + "members":{ + "deleteCollectionDetail":{ + "shape":"DeleteCollectionDetail", + "documentation":"

Details of the deleted collection.

" + } + } + }, + "DeleteSecurityConfigRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "id":{ + "shape":"SecurityConfigId", + "documentation":"

The security configuration identifier. For SAML the ID will be saml/<accountId>/<idpProviderName>. For example, saml/123456789123/OKTADev.

" + } + } + }, + "DeleteSecurityConfigResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteSecurityPolicyRequest":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the policy to delete.

" + }, + "type":{ + "shape":"SecurityPolicyType", + "documentation":"

The type of policy.

" + } + } + }, + "DeleteSecurityPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteVpcEndpointDetail":{ + "type":"structure", + "members":{ + "id":{ + "shape":"VpcEndpointId", + "documentation":"

The unique identifier of the endpoint.

" + }, + "name":{ + "shape":"VpcEndpointName", + "documentation":"

The name of the endpoint.

" + }, + "status":{ + "shape":"VpcEndpointStatus", + "documentation":"

The current status of the endpoint deletion process.

" + } + }, + "documentation":"

Deletion details for an OpenSearch Serverless-managed interface endpoint.

" + }, + "DeleteVpcEndpointRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "id":{ + "shape":"VpcEndpointId", + "documentation":"

The VPC endpoint identifier.

" + } + } + }, + "DeleteVpcEndpointResponse":{ + "type":"structure", + "members":{ + "deleteVpcEndpointDetail":{ + "shape":"DeleteVpcEndpointDetail", + "documentation":"

Details about the deleted endpoint.

" + } + } + }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "GetAccessPolicyRequest":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the access policy.

" + }, + "type":{ + "shape":"AccessPolicyType", + "documentation":"

Tye type of policy. Currently the only supported value is data.

" + } + } + }, + "GetAccessPolicyResponse":{ + "type":"structure", + "members":{ + "accessPolicyDetail":{ + "shape":"AccessPolicyDetail", + "documentation":"

Details about the requested access policy.

" + } + } + }, + "GetAccountSettingsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountSettingsResponse":{ + "type":"structure", + "members":{ + "accountSettingsDetail":{ + "shape":"AccountSettingsDetail", + "documentation":"

OpenSearch Serverless-related details for the current account.

" + } + } + }, + "GetPoliciesStatsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetPoliciesStatsResponse":{ + "type":"structure", + "members":{ + "AccessPolicyStats":{ + "shape":"AccessPolicyStats", + "documentation":"

Information about the data access policies in your account.

" + }, + "SecurityConfigStats":{ + "shape":"SecurityConfigStats", + "documentation":"

Information about the security configurations in your account.

" + }, + "SecurityPolicyStats":{ + "shape":"SecurityPolicyStats", + "documentation":"

Information about the security policies in your account.

" + }, + "TotalPolicyCount":{ + "shape":"Long", + "documentation":"

The total number of OpenSearch Serverless security policies and configurations in your account.

" + } + } + }, + "GetSecurityConfigRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"SecurityConfigId", + "documentation":"

The unique identifier of the security configuration.

" + } + } + }, + "GetSecurityConfigResponse":{ + "type":"structure", + "members":{ + "securityConfigDetail":{ + "shape":"SecurityConfigDetail", + "documentation":"

Details of the requested security configuration.

" + } + } + }, + "GetSecurityPolicyRequest":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the security policy.

" + }, + "type":{ + "shape":"SecurityPolicyType", + "documentation":"

The type of security policy.

" + } + } + }, + "GetSecurityPolicyResponse":{ + "type":"structure", + "members":{ + "securityPolicyDetail":{ + "shape":"SecurityPolicyDetail", + "documentation":"

Details about the requested security policy.

" + } + } + }, + "IndexingCapacityValue":{ + "type":"integer", + "box":true, + "min":2 + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Thrown when an error internal to the service occurs while processing a request.

", + "exception":true, + "fault":true + }, + "ListAccessPoliciesRequest":{ + "type":"structure", + "required":["type"], + "members":{ + "maxResults":{ + "shape":"ListAccessPoliciesRequestMaxResultsInteger", + "documentation":"

An optional parameter that specifies the maximum number of results to return. You can use nextToken to get the next page of results. The default is 20.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

If your initial ListAccessPolicies operation returns a nextToken, you can include the returned nextToken in subsequent ListAccessPolicies operations, which returns results in the next page.

" + }, + "resource":{ + "shape":"ListAccessPoliciesRequestResourceList", + "documentation":"

Resource filters (can be collection or indexes) that policies can apply to.

" + }, + "type":{ + "shape":"AccessPolicyType", + "documentation":"

The type of access policy.

" + } + } + }, + "ListAccessPoliciesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListAccessPoliciesRequestResourceList":{ + "type":"list", + "member":{"shape":"Resource"}, + "max":1000, + "min":1 + }, + "ListAccessPoliciesResponse":{ + "type":"structure", + "members":{ + "accessPolicySummaries":{ + "shape":"AccessPolicySummaries", + "documentation":"

Details about the requested access policies.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "ListCollectionsRequest":{ + "type":"structure", + "members":{ + "collectionFilters":{ + "shape":"CollectionFilters", + "documentation":"

List of filter names and values that you can use for requests.

" + }, + "maxResults":{ + "shape":"ListCollectionsRequestMaxResultsInteger", + "documentation":"

The maximum number of results to return. Default is 20. You can use nextToken to get the next page of results.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

If your initial ListCollections operation returns a nextToken, you can include the returned nextToken in subsequent ListCollections operations, which returns results in the next page.

" + } + } + }, + "ListCollectionsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListCollectionsResponse":{ + "type":"structure", + "members":{ + "collectionSummaries":{ + "shape":"CollectionSummaries", + "documentation":"

Details about each collection.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "ListSecurityConfigsRequest":{ + "type":"structure", + "required":["type"], + "members":{ + "maxResults":{ + "shape":"ListSecurityConfigsRequestMaxResultsInteger", + "documentation":"

An optional parameter that specifies the maximum number of results to return. You can use nextToken to get the next page of results. The default is 20.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

If your initial ListSecurityConfigs operation returns a nextToken, you can include the returned nextToken in subsequent ListSecurityConfigs operations, which returns results in the next page.

" + }, + "type":{ + "shape":"SecurityConfigType", + "documentation":"

The type of security configuration.

" + } + } + }, + "ListSecurityConfigsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListSecurityConfigsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + }, + "securityConfigSummaries":{ + "shape":"SecurityConfigSummaries", + "documentation":"

Details about the security configurations in your account.

" + } + } + }, + "ListSecurityPoliciesRequest":{ + "type":"structure", + "required":["type"], + "members":{ + "maxResults":{ + "shape":"ListSecurityPoliciesRequestMaxResultsInteger", + "documentation":"

An optional parameter that specifies the maximum number of results to return. You can use nextToken to get the next page of results. The default is 20.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

If your initial ListSecurityPolicies operation returns a nextToken, you can include the returned nextToken in subsequent ListSecurityPolicies operations, which returns results in the next page.

" + }, + "resource":{ + "shape":"ListSecurityPoliciesRequestResourceList", + "documentation":"

Resource filters (can be collection or indexes) that policies can apply to.

" + }, + "type":{ + "shape":"SecurityPolicyType", + "documentation":"

The type of policy.

" + } + } + }, + "ListSecurityPoliciesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListSecurityPoliciesRequestResourceList":{ + "type":"list", + "member":{"shape":"Resource"}, + "max":1000, + "min":1 + }, + "ListSecurityPoliciesResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + }, + "securityPolicySummaries":{ + "shape":"SecurityPolicySummaries", + "documentation":"

Details about the security policies in your account.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource. The resource must be active (not in the DELETING state), and must be owned by the account ID included in the request.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"Tags", + "documentation":"

The tags associated with the resource.

" + } + } + }, + "ListVpcEndpointsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListVpcEndpointsRequestMaxResultsInteger", + "documentation":"

An optional parameter that specifies the maximum number of results to return. You can use nextToken to get the next page of results. The default is 20.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

If your initial ListVpcEndpoints operation returns a nextToken, you can include the returned nextToken in subsequent ListVpcEndpoints operations, which returns results in the next page.

" + }, + "vpcEndpointFilters":{ + "shape":"VpcEndpointFilters", + "documentation":"

Filter the results according to the current status of the VPC endpoint. Possible statuses are CREATING, DELETING, UPDATING, ACTIVE, and FAILED.

" + } + } + }, + "ListVpcEndpointsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListVpcEndpointsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + }, + "vpcEndpointSummaries":{ + "shape":"VpcEndpointSummaries", + "documentation":"

Details about each VPC endpoint, including the name and current status.

" + } + } + }, + "Long":{ + "type":"long", + "box":true + }, + "PolicyDescription":{ + "type":"string", + "max":1000, + "min":1 + }, + "PolicyDocument":{ + "type":"string", + "max":20480, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u00A1-\\u00FF]+" + }, + "PolicyName":{ + "type":"string", + "max":32, + "min":3, + "pattern":"^[a-z][a-z0-9-]+$" + }, + "PolicyVersion":{ + "type":"string", + "max":36, + "min":20, + "pattern":"^([0-9a-zA-Z+/]{4})*(([0-9a-zA-Z+/]{2}==)|([0-9a-zA-Z+/]{3}=))?$" + }, + "Resource":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Thrown when accessing or deleting a resource that does not exist.

", + "exception":true + }, + "SamlConfigOptions":{ + "type":"structure", + "required":["metadata"], + "members":{ + "groupAttribute":{ + "shape":"samlGroupAttribute", + "documentation":"

The group attribute for this SAML integration.

" + }, + "metadata":{ + "shape":"samlMetadata", + "documentation":"

The XML IdP metadata file generated from your identity provider.

" + }, + "sessionTimeout":{ + "shape":"SamlConfigOptionsSessionTimeoutInteger", + "documentation":"

The session timeout, in minutes. Minimum is 15 minutes and maximum is 1440 minutes (24 hours or 1 day). Default is 60 minutes.

" + }, + "userAttribute":{ + "shape":"samlUserAttribute", + "documentation":"

A user attribute for this SAML integration.

" + } + }, + "documentation":"

Describes SAML options for an OpenSearch Serverless security configuration in the form of a key-value map.

" + }, + "SamlConfigOptionsSessionTimeoutInteger":{ + "type":"integer", + "box":true, + "max":720, + "min":5 + }, + "SearchCapacityValue":{ + "type":"integer", + "box":true, + "min":2 + }, + "SecurityConfigDetail":{ + "type":"structure", + "members":{ + "configVersion":{ + "shape":"PolicyVersion", + "documentation":"

The version of the security configuration.

" + }, + "createdDate":{ + "shape":"Long", + "documentation":"

The date the configuration was created.

" + }, + "description":{ + "shape":"ConfigDescription", + "documentation":"

The description of the security configuration.

" + }, + "id":{ + "shape":"SecurityConfigId", + "documentation":"

The unique identifier of the security configuration.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The timestamp of when the configuration was last modified.

" + }, + "samlOptions":{ + "shape":"SamlConfigOptions", + "documentation":"

SAML options for the security configuration in the form of a key-value map.

" + }, + "type":{ + "shape":"SecurityConfigType", + "documentation":"

The type of security configuration.

" + } + }, + "documentation":"

Details about a security configuration for OpenSearch Serverless.

" + }, + "SecurityConfigId":{ + "type":"string", + "max":100, + "min":1 + }, + "SecurityConfigStats":{ + "type":"structure", + "members":{ + "SamlConfigCount":{ + "shape":"Long", + "documentation":"

The number of security configurations in the current account.

" + } + }, + "documentation":"

Statistics for an OpenSearch Serverless security configuration.

" + }, + "SecurityConfigSummaries":{ + "type":"list", + "member":{"shape":"SecurityConfigSummary"} + }, + "SecurityConfigSummary":{ + "type":"structure", + "members":{ + "configVersion":{ + "shape":"PolicyVersion", + "documentation":"

The version of the security configuration.

" + }, + "createdDate":{ + "shape":"Long", + "documentation":"

The Epoch time when the security configuration was created.

" + }, + "description":{ + "shape":"ConfigDescription", + "documentation":"

The description of the security configuration.

" + }, + "id":{ + "shape":"SecurityConfigId", + "documentation":"

The unique identifier of the security configuration.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The timestamp of when the configuration was last modified.

" + }, + "type":{ + "shape":"SecurityConfigType", + "documentation":"

The type of security configuration.

" + } + }, + "documentation":"

A summary of a security configuration for OpenSearch Serverless.

" + }, + "SecurityConfigType":{ + "type":"string", + "enum":["saml"] + }, + "SecurityGroupId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\w+\\-]+$" + }, + "SecurityGroupIds":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":5, + "min":1 + }, + "SecurityPolicyDetail":{ + "type":"structure", + "members":{ + "createdDate":{ + "shape":"Long", + "documentation":"

The date the policy was created.

" + }, + "description":{ + "shape":"PolicyDescription", + "documentation":"

The description of the security policy.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The timestamp of when the policy was last modified.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the policy.

" + }, + "policy":{ + "shape":"Document", + "documentation":"

The JSON policy document without any whitespaces.

" + }, + "policyVersion":{ + "shape":"PolicyVersion", + "documentation":"

The version of the policy.

" + }, + "type":{ + "shape":"SecurityPolicyType", + "documentation":"

The type of security policy.

" + } + }, + "documentation":"

Details about an OpenSearch Serverless security policy.

" + }, + "SecurityPolicyStats":{ + "type":"structure", + "members":{ + "EncryptionPolicyCount":{ + "shape":"Long", + "documentation":"

The number of encryption policies in the current account.

" + }, + "NetworkPolicyCount":{ + "shape":"Long", + "documentation":"

The number of network policies in the current account.

" + } + }, + "documentation":"

Statistics for an OpenSearch Serverless security policy.

" + }, + "SecurityPolicySummaries":{ + "type":"list", + "member":{"shape":"SecurityPolicySummary"} + }, + "SecurityPolicySummary":{ + "type":"structure", + "members":{ + "createdDate":{ + "shape":"Long", + "documentation":"

The date the policy was created.

" + }, + "description":{ + "shape":"PolicyDescription", + "documentation":"

The description of the security policy.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The timestamp of when the policy was last modified.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the policy.

" + }, + "policyVersion":{ + "shape":"PolicyVersion", + "documentation":"

The version of the policy.

" + }, + "type":{ + "shape":"SecurityPolicyType", + "documentation":"

The type of security policy.

" + } + }, + "documentation":"

A summary of a security policy for OpenSearch Serverless.

" + }, + "SecurityPolicyType":{ + "type":"string", + "enum":[ + "encryption", + "network" + ] + }, + "String":{"type":"string"}, + "SubnetId":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^subnet-([0-9a-f]{8}|[0-9a-f]{17})$" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":6, + "min":1 + }, + "Tag":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"TagKey", + "documentation":"

The key to use in the tag.

" + }, + "value":{ + "shape":"TagValue", + "documentation":"

The value of the tag.

" + } + }, + "documentation":"

A map of key-value pairs associated to an OpenSearch Serverless resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource. The resource must be active (not in the DELETING state), and must be owned by the account ID included in the request.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

A list of tags (key-value pairs) to add to the resource. All tag keys in the request must be unique.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":0 + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to remove tags from. The resource must be active (not in the DELETING state), and must be owned by the account ID included in the request.

" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

The tag or set of tags to remove from the resource. All tag keys in the request must be unique.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAccessPolicyRequest":{ + "type":"structure", + "required":[ + "name", + "policyVersion", + "type" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"PolicyDescription", + "documentation":"

A description of the policy. Typically used to store information about the permissions defined in the policy.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the policy.

" + }, + "policy":{ + "shape":"PolicyDocument", + "documentation":"

The JSON policy document to use as the content for the policy.

" + }, + "policyVersion":{ + "shape":"PolicyVersion", + "documentation":"

The version of the policy being updated.

" + }, + "type":{ + "shape":"AccessPolicyType", + "documentation":"

The type of policy.

" + } + } + }, + "UpdateAccessPolicyResponse":{ + "type":"structure", + "members":{ + "accessPolicyDetail":{ + "shape":"AccessPolicyDetail", + "documentation":"

Details about the updated access policy.

" + } + } + }, + "UpdateAccountSettingsRequest":{ + "type":"structure", + "members":{ + "capacityLimits":{"shape":"CapacityLimits"} + } + }, + "UpdateAccountSettingsResponse":{ + "type":"structure", + "members":{ + "accountSettingsDetail":{ + "shape":"AccountSettingsDetail", + "documentation":"

OpenSearch Serverless-related settings for the current Amazon Web Services account.

" + } + } + }, + "UpdateCollectionDetail":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the collection.

" + }, + "createdDate":{ + "shape":"Long", + "documentation":"

The date and time when the collection was created.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the collection.

" + }, + "id":{ + "shape":"CollectionId", + "documentation":"

The unique identifier of the collection.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The date and time when the collection was last modified.

" + }, + "name":{ + "shape":"CollectionName", + "documentation":"

The name of the collection.

" + }, + "status":{ + "shape":"CollectionStatus", + "documentation":"

The current status of the collection.

" + }, + "type":{ + "shape":"CollectionType", + "documentation":"

The collection type.

" + } + }, + "documentation":"

Details about an updated OpenSearch Serverless collection.

" + }, + "UpdateCollectionRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"UpdateCollectionRequestDescriptionString", + "documentation":"

A description of the collection.

" + }, + "id":{ + "shape":"CollectionId", + "documentation":"

The unique identifier of the collection.

" + } + } + }, + "UpdateCollectionRequestDescriptionString":{ + "type":"string", + "max":1000, + "min":0 + }, + "UpdateCollectionResponse":{ + "type":"structure", + "members":{ + "updateCollectionDetail":{ + "shape":"UpdateCollectionDetail", + "documentation":"

Details about the updated collection.

" + } + } + }, + "UpdateSecurityConfigRequest":{ + "type":"structure", + "required":[ + "configVersion", + "id" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "configVersion":{ + "shape":"PolicyVersion", + "documentation":"

The version of the security configuration to be updated. You can find the most recent version of a security configuration using the GetSecurityPolicy command.

" + }, + "description":{ + "shape":"ConfigDescription", + "documentation":"

A description of the security configuration.

" + }, + "id":{ + "shape":"SecurityConfigId", + "documentation":"

The security configuration identifier. For SAML the ID will be saml/<accountId>/<idpProviderName>. For example, saml/123456789123/OKTADev.

" + }, + "samlOptions":{ + "shape":"SamlConfigOptions", + "documentation":"

SAML options in in the form of a key-value map.

" + } + } + }, + "UpdateSecurityConfigResponse":{ + "type":"structure", + "members":{ + "securityConfigDetail":{ + "shape":"SecurityConfigDetail", + "documentation":"

Details about the updated security configuration.

" + } + } + }, + "UpdateSecurityPolicyRequest":{ + "type":"structure", + "required":[ + "name", + "policyVersion", + "type" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"PolicyDescription", + "documentation":"

A description of the policy. Typically used to store information about the permissions defined in the policy.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the policy.

" + }, + "policy":{ + "shape":"PolicyDocument", + "documentation":"

The JSON policy document to use as the content for the new policy.

" + }, + "policyVersion":{ + "shape":"PolicyVersion", + "documentation":"

The version of the policy being updated.

" + }, + "type":{ + "shape":"SecurityPolicyType", + "documentation":"

The type of access policy.

" + } + } + }, + "UpdateSecurityPolicyResponse":{ + "type":"structure", + "members":{ + "securityPolicyDetail":{ + "shape":"SecurityPolicyDetail", + "documentation":"

Details about the updated security policy.

" + } + } + }, + "UpdateVpcEndpointDetail":{ + "type":"structure", + "members":{ + "id":{ + "shape":"VpcEndpointId", + "documentation":"

The unique identifier of the endpoint.

" + }, + "lastModifiedDate":{ + "shape":"Long", + "documentation":"

The timestamp of when the endpoint was last modified.

" + }, + "name":{ + "shape":"VpcEndpointName", + "documentation":"

The name of the endpoint.

" + }, + "securityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The unique identifiers of the security groups that define the ports, protocols, and sources for inbound traffic that you are authorizing into your endpoint.

" + }, + "status":{ + "shape":"VpcEndpointStatus", + "documentation":"

The current status of the endpoint update process.

" + }, + "subnetIds":{ + "shape":"SubnetIds", + "documentation":"

The ID of the subnets from which you access OpenSearch Serverless.

" + } + }, + "documentation":"

Update details for an OpenSearch Serverless-managed interface endpoint.

" + }, + "UpdateVpcEndpointRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "addSecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The unique identifiers of the security groups to add to the endpoint. Security groups define the ports, protocols, and sources for inbound traffic that you are authorizing into your endpoint.

" + }, + "addSubnetIds":{ + "shape":"SubnetIds", + "documentation":"

The ID of one or more subnets to add to the endpoint.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", + "idempotencyToken":true + }, + "id":{ + "shape":"VpcEndpointId", + "documentation":"

The unique identifier of the interface endpoint to update.

" + }, + "removeSecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The unique identifiers of the security groups to remove from the endpoint.

" + }, + "removeSubnetIds":{ + "shape":"SubnetIds", + "documentation":"

The unique identifiers of the subnets to remove from the endpoint.

" + } + } + }, + "UpdateVpcEndpointResponse":{ + "type":"structure", + "members":{ + "UpdateVpcEndpointDetail":{ + "shape":"UpdateVpcEndpointDetail", + "documentation":"

Details about the updated VPC endpoint.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Thrown when the HTTP request contains invalid input or is missing required input.

", + "exception":true + }, + "VpcEndpointDetail":{ + "type":"structure", + "members":{ + "createdDate":{ + "shape":"Long", + "documentation":"

The date the endpoint was created.

" + }, + "id":{ + "shape":"VpcEndpointId", + "documentation":"

The unique identifier of the endpoint.

" + }, + "name":{ + "shape":"VpcEndpointName", + "documentation":"

The name of the endpoint.

" + }, + "securityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The unique identifiers of the security groups that define the ports, protocols, and sources for inbound traffic that you are authorizing into your endpoint.

" + }, + "status":{ + "shape":"VpcEndpointStatus", + "documentation":"

The current status of the endpoint.

" + }, + "subnetIds":{ + "shape":"SubnetIds", + "documentation":"

The ID of the subnets from which you access OpenSearch Serverless.

" + }, + "vpcId":{ + "shape":"VpcId", + "documentation":"

The ID of the VPC from which you access OpenSearch Serverless

" + } + }, + "documentation":"

Details about an OpenSearch Serverless-managed interface endpoint.

" + }, + "VpcEndpointDetails":{ + "type":"list", + "member":{"shape":"VpcEndpointDetail"} + }, + "VpcEndpointErrorDetail":{ + "type":"structure", + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

The error code for the failed request.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

An error message describing the reason for the failure.

" + }, + "id":{ + "shape":"VpcEndpointId", + "documentation":"

The unique identifier of the VPC endpoint.

" + } + }, + "documentation":"

Error information for a failed BatchGetVpcEndpoint request.

" + }, + "VpcEndpointErrorDetails":{ + "type":"list", + "member":{"shape":"VpcEndpointErrorDetail"} + }, + "VpcEndpointFilters":{ + "type":"structure", + "members":{ + "status":{ + "shape":"VpcEndpointStatus", + "documentation":"

The current status of the endpoint.

" + } + }, + "documentation":"

Filter the results of a ListVpcEndpoints request.

" + }, + "VpcEndpointId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^vpce-[0-9a-z]*$" + }, + "VpcEndpointIds":{ + "type":"list", + "member":{"shape":"VpcEndpointId"}, + "min":1 + }, + "VpcEndpointName":{ + "type":"string", + "max":32, + "min":3, + "pattern":"^[a-z][a-z0-9-]+$" + }, + "VpcEndpointStatus":{ + "type":"string", + "enum":[ + "PENDING", + "DELETING", + "ACTIVE", + "FAILED" + ] + }, + "VpcEndpointSummaries":{ + "type":"list", + "member":{"shape":"VpcEndpointSummary"} + }, + "VpcEndpointSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"VpcEndpointId", + "documentation":"

The unique identifier of the endpoint.

" + }, + "name":{ + "shape":"VpcEndpointName", + "documentation":"

The name of the endpoint.

" + }, + "status":{ + "shape":"VpcEndpointStatus", + "documentation":"

The current status of the endpoint.

" + } + }, + "documentation":"

The VPC endpoint object.

" + }, + "VpcId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^vpc-[0-9a-z]*$" + }, + "samlGroupAttribute":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "samlMetadata":{ + "type":"string", + "max":20480, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u00A1-\\u00FF]+" + }, + "samlUserAttribute":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\w+=,.@-]+" + } + }, + "documentation":"

Use the Amazon OpenSearch Serverless API to create, configure, and manage OpenSearch Serverless collections and security policies.

OpenSearch Serverless is an on-demand, pre-provisioned serverless configuration for Amazon OpenSearch Service. OpenSearch Serverless removes the operational complexities of provisioning, configuring, and tuning your OpenSearch clusters. It enables you to easily search and analyze petabytes of data without having to worry about the underlying infrastructure and data management.

To learn more about OpenSearch Serverless, see What is Amazon OpenSearch Serverless?

" +} diff --git a/botocore/data/securitylake/2018-05-10/endpoint-rule-set-1.json b/botocore/data/securitylake/2018-05-10/endpoint-rule-set-1.json new file mode 100644 index 0000000000..14d2d67cf1 --- /dev/null +++ b/botocore/data/securitylake/2018-05-10/endpoint-rule-set-1.json @@ -0,0 +1,309 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": true, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://securitylake-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://securitylake-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://securitylake.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://securitylake.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] +} \ No newline at end of file diff --git a/botocore/data/securitylake/2018-05-10/paginators-1.json b/botocore/data/securitylake/2018-05-10/paginators-1.json new file mode 100644 index 0000000000..4720717302 --- /dev/null +++ b/botocore/data/securitylake/2018-05-10/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "GetDatalakeStatus": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxAccountResults", + "result_key": "accountSourcesList" + }, + "ListDatalakeExceptions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxFailures", + "result_key": "nonRetryableFailures" + }, + "ListLogSources": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "regionSourceTypesAccountsList" + }, + "ListSubscribers": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "subscribers" + } + } +} diff --git a/botocore/data/securitylake/2018-05-10/service-2.json b/botocore/data/securitylake/2018-05-10/service-2.json new file mode 100644 index 0000000000..85963fee4d --- /dev/null +++ b/botocore/data/securitylake/2018-05-10/service-2.json @@ -0,0 +1,2224 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"securitylake", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Security Lake", + "serviceId":"SecurityLake", + "signatureVersion":"v4", + "signingName":"securitylake", + "uid":"securitylake-2018-05-10" + }, + "operations":{ + "CreateAwsLogSource":{ + "name":"CreateAwsLogSource", + "http":{ + "method":"POST", + "requestUri":"/v1/logsources/aws", + "responseCode":200 + }, + "input":{"shape":"CreateAwsLogSourceRequest"}, + "output":{"shape":"CreateAwsLogSourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"S3Exception"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Adds a natively-supported Amazon Web Services service as a Security Lake source. Enables source types for member accounts in required Regions, based on specified parameters. You can choose any source type in any Region for accounts that are either part of a trusted organization or standalone accounts. At least one of the three dimensions is a mandatory input to this API. However, any combination of the three dimensions can be supplied to this API.

By default, dimension refers to the entire set. When you don't provide a dimension, Security Lake assumes that the missing dimension refers to the entire set. This is overridden when you supply any one of the inputs. For instance, when members is not specified, the API disables all Security Lake member accounts for sources. Similarly, when Regions are not specified, Security Lake is disabled for all the Regions where Security Lake is available as a service.

You can use this API only to enable a natively-supported Amazon Web Services services as a source. Use CreateCustomLogSource to enable data collection from a custom source.

" + }, + "CreateCustomLogSource":{ + "name":"CreateCustomLogSource", + "http":{ + "method":"POST", + "requestUri":"/v1/logsources/custom", + "responseCode":200 + }, + "input":{"shape":"CreateCustomLogSourceRequest"}, + "output":{"shape":"CreateCustomLogSourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictSourceNamesException"}, + {"shape":"AccessDeniedException"}, + {"shape":"BucketNotFoundException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Adds a third-party custom source in Amazon Security Lake, from the Region where you want to create a custom source. Security Lake can collect logs and events from third-party custom sources. After creating the appropriate API roles, use this API to add a custom source name in Security Lake. This operation creates a partition in the Security Lake S3 bucket as the target location for log files from the custom source, an associated Glue table, and an Glue crawler.

" + }, + "CreateDatalake":{ + "name":"CreateDatalake", + "http":{ + "method":"POST", + "requestUri":"/v1/datalake", + "responseCode":200 + }, + "input":{"shape":"CreateDatalakeRequest"}, + "output":{"shape":"CreateDatalakeResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Regions with customized settings in advance before enabling log collection in Regions. You can either use the enableAll parameter to specify all Regions or you can specify the Regions you want to enable Security Lake using the Regions parameter and configure these Regions using the configurations parameter. When the CreateDataLake API is called multiple times, if that Region is already enabled, it will update the Region if configuration for that Region is provided. If that Region is a new Region, it will be set up with the customized configurations if it is specified.

When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide.

" + }, + "CreateDatalakeAutoEnable":{ + "name":"CreateDatalakeAutoEnable", + "http":{ + "method":"POST", + "requestUri":"/v1/datalake/autoenable", + "responseCode":200 + }, + "input":{"shape":"CreateDatalakeAutoEnableRequest"}, + "output":{"shape":"CreateDatalakeAutoEnableResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Automatically enable Security Lake in the specified Regions to begin ingesting security data. When you choose to enable organization accounts automatically, then Security Lake begins to enable new accounts as member accounts as they are added to the organization. Security Lake does not enable existing organization accounts that are not yet enabled.

" + }, + "CreateDatalakeDelegatedAdmin":{ + "name":"CreateDatalakeDelegatedAdmin", + "http":{ + "method":"POST", + "requestUri":"/v1/datalake/delegate", + "responseCode":200 + }, + "input":{"shape":"CreateDatalakeDelegatedAdminRequest"}, + "output":{"shape":"CreateDatalakeDelegatedAdminResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Designates the Security Lake administrator account for the organization. This API can only be called by the organization management account. The organization management account cannot be the delegated administrator account.

" + }, + "CreateDatalakeExceptionsSubscription":{ + "name":"CreateDatalakeExceptionsSubscription", + "http":{ + "method":"POST", + "requestUri":"/v1/datalake/exceptions/subscription", + "responseCode":200 + }, + "input":{"shape":"CreateDatalakeExceptionsSubscriptionRequest"}, + "output":{"shape":"CreateDatalakeExceptionsSubscriptionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Creates the specified notification subscription in Security Lake. Creates the specified subscription notifications in the specified organization.

" + }, + "CreateSubscriber":{ + "name":"CreateSubscriber", + "http":{ + "method":"POST", + "requestUri":"/v1/subscribers", + "responseCode":200 + }, + "input":{"shape":"CreateSubscriberRequest"}, + "output":{"shape":"CreateSubscriberResponse"}, + "errors":[ + {"shape":"ConflictSubscriptionException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"BucketNotFoundException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Creates a subscription permission for accounts that are already enabled in Security Lake.

" + }, + "CreateSubscriptionNotificationConfiguration":{ + "name":"CreateSubscriptionNotificationConfiguration", + "http":{ + "method":"POST", + "requestUri":"/subscription-notifications/{subscriptionId}", + "responseCode":200 + }, + "input":{"shape":"CreateSubscriptionNotificationConfigurationRequest"}, + "output":{"shape":"CreateSubscriptionNotificationConfigurationResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Creates the specified notification subscription in Security Lake. Creates the specified subscription notifications from the specified organization.

" + }, + "DeleteAwsLogSource":{ + "name":"DeleteAwsLogSource", + "http":{ + "method":"POST", + "requestUri":"/v1/logsources/aws/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteAwsLogSourceRequest"}, + "output":{"shape":"DeleteAwsLogSourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Removes a natively-supported Amazon Web Services service as a Amazon Security Lake source. When you remove the source, Security Lake stops collecting data from that source, and subscribers can no longer consume new data from the source. Subscribers can still consume data that Amazon Security Lake collected from the source before disablement.

You can choose any source type in any Region for accounts that are either part of a trusted organization or standalone accounts. At least one of the three dimensions is a mandatory input to this API. However, any combination of the three dimensions can be supplied to this API.

By default, dimension refers to the entire set. This is overridden when you supply any one of the inputs. For instance, when members is not specified, the API disables all Security Lake member accounts for sources. Similarly, when Regions are not specified, Security Lake is disabled for all the Regions where Security Lake is available as a service.

You can use this API to remove a natively-supported Amazon Web Services service as a source. Use DeregisterCustomData to remove a custom source.

When you don't provide a dimension, Security Lake assumes that the missing dimension refers to the entire set. For example, if you don't provide specific accounts, the API applies to the entire set of accounts in your organization.

" + }, + "DeleteCustomLogSource":{ + "name":"DeleteCustomLogSource", + "http":{ + "method":"DELETE", + "requestUri":"/v1/logsources/custom", + "responseCode":200 + }, + "input":{"shape":"DeleteCustomLogSourceRequest"}, + "output":{"shape":"DeleteCustomLogSourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictSourceNamesException"}, + {"shape":"AccessDeniedException"}, + {"shape":"BucketNotFoundException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Removes a custom log source from Security Lake.

", + "idempotent":true + }, + "DeleteDatalake":{ + "name":"DeleteDatalake", + "http":{ + "method":"DELETE", + "requestUri":"/v1/datalake", + "responseCode":200 + }, + "input":{"shape":"DeleteDatalakeRequest"}, + "output":{"shape":"DeleteDatalakeResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

When you delete Amazon Security Lake from your account, Security Lake is disabled in all Regions. Also, this API automatically performs the off-boarding steps to off-board the account from Security Lake . This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also deletes all the existing settings and resources that it stores or maintains for your account in the current Region, including security log and event data. DeleteDatalake does not delete the S3 bucket which is owned by the Amazon Web Services account. For more information, see the Amazon Security Lake User Guide.

", + "idempotent":true + }, + "DeleteDatalakeAutoEnable":{ + "name":"DeleteDatalakeAutoEnable", + "http":{ + "method":"POST", + "requestUri":"/v1/datalake/autoenable/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteDatalakeAutoEnableRequest"}, + "output":{"shape":"DeleteDatalakeAutoEnableResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Automatically delete Security Lake in the specified Regions to stop ingesting security data. When you delete Amazon Security Lake from your account, Security Lake is disabled in all Regions. Also, this API automatically performs the off-boarding steps to off-board the account from Security Lake . This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also deletes all the existing settings and resources that it stores or maintains for your account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide.

" + }, + "DeleteDatalakeDelegatedAdmin":{ + "name":"DeleteDatalakeDelegatedAdmin", + "http":{ + "method":"DELETE", + "requestUri":"/v1/datalake/delegate/{account}", + "responseCode":200 + }, + "input":{"shape":"DeleteDatalakeDelegatedAdminRequest"}, + "output":{"shape":"DeleteDatalakeDelegatedAdminResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes the Security Lake administrator account for the organization. This API can only be called by the organization management account. The organization management account cannot be the delegated administrator account.

" + }, + "DeleteDatalakeExceptionsSubscription":{ + "name":"DeleteDatalakeExceptionsSubscription", + "http":{ + "method":"DELETE", + "requestUri":"/v1/datalake/exceptions/subscription", + "responseCode":200 + }, + "input":{"shape":"DeleteDatalakeExceptionsSubscriptionRequest"}, + "output":{"shape":"DeleteDatalakeExceptionsSubscriptionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Deletes the specified notification subscription in Security Lake. Deletes the specified subscription notifications in the specified organization.

" + }, + "DeleteSubscriber":{ + "name":"DeleteSubscriber", + "http":{ + "method":"DELETE", + "requestUri":"/v1/subscribers", + "responseCode":200 + }, + "input":{"shape":"DeleteSubscriberRequest"}, + "output":{"shape":"DeleteSubscriberResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"BucketNotFoundException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Deletes the specified subscription permissions to Security Lake. Deletes the specified subscription permissions from the specified organization.

", + "idempotent":true + }, + "DeleteSubscriptionNotificationConfiguration":{ + "name":"DeleteSubscriptionNotificationConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/subscription-notifications/{subscriptionId}", + "responseCode":200 + }, + "input":{"shape":"DeleteSubscriptionNotificationConfigurationRequest"}, + "output":{"shape":"DeleteSubscriptionNotificationConfigurationResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Deletes the specified notification subscription in Security Lake. Deletes the specified subscription notifications from the specified organization.

", + "idempotent":true + }, + "GetDatalake":{ + "name":"GetDatalake", + "http":{ + "method":"GET", + "requestUri":"/v1/datalake", + "responseCode":200 + }, + "input":{"shape":"GetDatalakeRequest"}, + "output":{"shape":"GetDatalakeResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Retrieve the Security Lake configuration object for the specified account ID. This API does not take input parameters.

" + }, + "GetDatalakeAutoEnable":{ + "name":"GetDatalakeAutoEnable", + "http":{ + "method":"GET", + "requestUri":"/v1/datalake/autoenable", + "responseCode":200 + }, + "input":{"shape":"GetDatalakeAutoEnableRequest"}, + "output":{"shape":"GetDatalakeAutoEnableResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Retrieves the configuration that will be automatically set up for accounts added to the organization after the organization has on boarded to Amazon Security Lake. This API does not take input parameters.

" + }, + "GetDatalakeExceptionsExpiry":{ + "name":"GetDatalakeExceptionsExpiry", + "http":{ + "method":"GET", + "requestUri":"/v1/datalake/exceptions/expiry", + "responseCode":200 + }, + "input":{"shape":"GetDatalakeExceptionsExpiryRequest"}, + "output":{"shape":"GetDatalakeExceptionsExpiryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Retrieves the expiration period and time-to-live (TTL) for which the exception message will remain. Exceptions are stored by default, for a 2 week period of time from when a record was created in Security Lake. This API does not take input parameters. This API does not take input parameters.

" + }, + "GetDatalakeExceptionsSubscription":{ + "name":"GetDatalakeExceptionsSubscription", + "http":{ + "method":"GET", + "requestUri":"/v1/datalake/exceptions/subscription", + "responseCode":200 + }, + "input":{"shape":"GetDatalakeExceptionsSubscriptionRequest"}, + "output":{"shape":"GetDatalakeExceptionsSubscriptionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Retrieves the details of exception notifications for the account in Amazon Security Lake.

" + }, + "GetDatalakeStatus":{ + "name":"GetDatalakeStatus", + "http":{ + "method":"POST", + "requestUri":"/v1/datalake/status", + "responseCode":200 + }, + "input":{"shape":"GetDatalakeStatusRequest"}, + "output":{"shape":"GetDatalakeStatusResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Retrieve the Security Lake configuration object for the specified account ID. This API does not take input parameters.

" + }, + "GetSubscriber":{ + "name":"GetSubscriber", + "http":{ + "method":"GET", + "requestUri":"/v1/subscribers/{id}", + "responseCode":200 + }, + "input":{"shape":"GetSubscriberRequest"}, + "output":{"shape":"GetSubscriberResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Retrieves subscription information for the specified subscription ID.

" + }, + "ListDatalakeExceptions":{ + "name":"ListDatalakeExceptions", + "http":{ + "method":"POST", + "requestUri":"/v1/datalake/exceptions", + "responseCode":200 + }, + "input":{"shape":"ListDatalakeExceptionsRequest"}, + "output":{"shape":"ListDatalakeExceptionsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

List the Amazon Security Lake exceptions that you can use to find the source of problems and fix them.

" + }, + "ListLogSources":{ + "name":"ListLogSources", + "http":{ + "method":"POST", + "requestUri":"/v1/logsources/list", + "responseCode":200 + }, + "input":{"shape":"ListLogSourcesRequest"}, + "output":{"shape":"ListLogSourcesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Lists the log sources in the current region.

" + }, + "ListSubscribers":{ + "name":"ListSubscribers", + "http":{ + "method":"GET", + "requestUri":"/v1/subscribers", + "responseCode":200 + }, + "input":{"shape":"ListSubscribersRequest"}, + "output":{"shape":"ListSubscribersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

List all subscribers for the specific Security Lake account ID.

" + }, + "UpdateDatalake":{ + "name":"UpdateDatalake", + "http":{ + "method":"PUT", + "requestUri":"/v1/datalake", + "responseCode":200 + }, + "input":{"shape":"UpdateDatalakeRequest"}, + "output":{"shape":"UpdateDatalakeResponse"}, + "errors":[ + {"shape":"EventBridgeException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Amazon Security Lake allows you to specify where to store your security data and for how long. You can specify a rollup Region to consolidate data from multiple regions.

You can update the properties of a Region or source. Input can either be directly specified to the API.

", + "idempotent":true + }, + "UpdateDatalakeExceptionsExpiry":{ + "name":"UpdateDatalakeExceptionsExpiry", + "http":{ + "method":"PUT", + "requestUri":"/v1/datalake/exceptions/expiry", + "responseCode":200 + }, + "input":{"shape":"UpdateDatalakeExceptionsExpiryRequest"}, + "output":{"shape":"UpdateDatalakeExceptionsExpiryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Update the expiration period for the exception message to your preferred time, and control the time-to-live (TTL) for the exception message to remain. Exceptions are stored by default, for a 2 week period of time from when a record was created in Security Lake.

" + }, + "UpdateDatalakeExceptionsSubscription":{ + "name":"UpdateDatalakeExceptionsSubscription", + "http":{ + "method":"PUT", + "requestUri":"/v1/datalake/exceptions/subscription", + "responseCode":200 + }, + "input":{"shape":"UpdateDatalakeExceptionsSubscriptionRequest"}, + "output":{"shape":"UpdateDatalakeExceptionsSubscriptionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"} + ], + "documentation":"

Update the subscription notification for exception notification.

" + }, + "UpdateSubscriber":{ + "name":"UpdateSubscriber", + "http":{ + "method":"PUT", + "requestUri":"/v1/subscribers/{id}", + "responseCode":200 + }, + "input":{"shape":"UpdateSubscriberRequest"}, + "output":{"shape":"UpdateSubscriberResponse"}, + "errors":[ + {"shape":"ConflictSubscriptionException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AccountNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Update the subscription permission for the given Security Lake account ID.

", + "idempotent":true + }, + "UpdateSubscriptionNotificationConfiguration":{ + "name":"UpdateSubscriptionNotificationConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/subscription-notifications/{subscriptionId}", + "responseCode":200 + }, + "input":{"shape":"UpdateSubscriptionNotificationConfigurationRequest"}, + "output":{"shape":"UpdateSubscriptionNotificationConfigurationResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Create a new subscription notification or add the existing subscription notification setting for the specified subscription ID.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You do not have sufficient access to perform this action. Access denied errors appear when Amazon Security Lake explicitly or implicitly denies an authorization request. An explicit denial occurs when a policy contains a Deny statement for the specific Amazon Web Services action. An implicit denial occurs when there is no applicable Deny statement and also no applicable Allow statement.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccessType":{ + "type":"string", + "enum":[ + "LAKEFORMATION", + "S3" + ] + }, + "AccessTypeList":{ + "type":"list", + "member":{"shape":"AccessType"} + }, + "AccountList":{ + "type":"list", + "member":{"shape":"String"} + }, + "AccountNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Amazon Security Lake can't find an Amazon Web Services account with the accountID that you specified, or the account whose credentials you used to make this request isn't a member of an organization.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccountSources":{ + "type":"structure", + "required":[ + "account", + "sourceType" + ], + "members":{ + "account":{ + "shape":"String", + "documentation":"

Account ID of the Security Lake account for which logs are collected.

" + }, + "eventClass":{ + "shape":"OcsfEventClass", + "documentation":"

Initializes a new instance of the Event class.

" + }, + "logsStatus":{ + "shape":"LogsStatusList", + "documentation":"

Log status for the Security Lake account.

" + }, + "sourceType":{ + "shape":"String", + "documentation":"

The supported Amazon Web Services services from which logs and events are collected. Amazon Security Lake supports logs and events collection for natively-supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide.

" + } + }, + "documentation":"

Security Lake can collect logs and events from supported Amazon Web Services services and custom sources.

" + }, + "AccountSourcesList":{ + "type":"list", + "member":{"shape":"AccountSources"} + }, + "AllDimensionsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"TwoDimensionsMap"} + }, + "AutoEnableNewRegionConfiguration":{ + "type":"structure", + "required":[ + "region", + "sources" + ], + "members":{ + "region":{ + "shape":"Region", + "documentation":"

The Regions where Security Lake is auto enabled

" + }, + "sources":{ + "shape":"AwsSourceTypeList", + "documentation":"

The Amazon Web Services sources which are auto enabled in Security Lake.

" + } + }, + "documentation":"

Automatically enable new organization accounts as member accounts from a Security Lake administrator account.

" + }, + "AutoEnableNewRegionConfigurationList":{ + "type":"list", + "member":{"shape":"AutoEnableNewRegionConfiguration"} + }, + "AwsAccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^\\d+$" + }, + "AwsLogSourceType":{ + "type":"string", + "enum":[ + "ROUTE53", + "VPC_FLOW", + "CLOUD_TRAIL", + "SH_FINDINGS" + ] + }, + "AwsSourceTypeList":{ + "type":"list", + "member":{"shape":"AwsLogSourceType"} + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "BucketNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Amazon Security Lake generally returns 404 errors if the requested object is missing from the bucket.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ConcurrentModificationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

More than one process tried to modify a resource at the same time.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

A conflict occurred when prompting for the Resource ID.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The resource type.

" + } + }, + "documentation":"

Occurs when a conflict with a previous successful write is detected. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ConflictSourceNamesException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

There was a conflict when you attempted to modify a Security Lake source name.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ConflictSubscriptionException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

A conflicting subscription exception operation is in progress.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CreateAwsLogSourceRequest":{ + "type":"structure", + "required":["inputOrder"], + "members":{ + "enableAllDimensions":{ + "shape":"AllDimensionsMap", + "documentation":"

Enables specific sources in all Regions and source types.

" + }, + "enableSingleDimension":{ + "shape":"InputSet", + "documentation":"

Enables all sources in specific accounts or Regions.

" + }, + "enableTwoDimensions":{ + "shape":"TwoDimensionsMap", + "documentation":"

Enables specific service sources in specific accounts or Regions.

" + }, + "inputOrder":{ + "shape":"DimensionSet", + "documentation":"

Specifies the input order to enable dimensions in Security Lake, namely region, source type, and member account.

" + } + } + }, + "CreateAwsLogSourceResponse":{ + "type":"structure", + "members":{ + "failed":{ + "shape":"AccountList", + "documentation":"

List of all accounts in which enabling a natively-supported Amazon Web Services service as a Security Lake failed. The failure occurred as these accounts are not part of an organization.

" + }, + "processing":{ + "shape":"AccountList", + "documentation":"

List of all accounts which are in the process of enabling a natively-supported Amazon Web Services service as a Security Lake.

" + } + } + }, + "CreateCustomLogSourceRequest":{ + "type":"structure", + "required":[ + "customSourceName", + "eventClass", + "glueInvocationRoleArn", + "logProviderAccountId" + ], + "members":{ + "customSourceName":{ + "shape":"CustomSourceType", + "documentation":"

The custom source name for a third-party custom source.

" + }, + "eventClass":{ + "shape":"OcsfEventClass", + "documentation":"

The Open Cybersecurity Schema Framework (OCSF) event class.

" + }, + "glueInvocationRoleArn":{ + "shape":"RoleArn", + "documentation":"

The IAM Role ARN to be used by the Glue Crawler. The recommended IAM policies are:

" + }, + "logProviderAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The Account ID that will assume the above Role to put logs into the Data Lake.

" + } + } + }, + "CreateCustomLogSourceResponse":{ + "type":"structure", + "required":[ + "customDataLocation", + "glueCrawlerName", + "glueDatabaseName", + "glueTableName", + "logProviderAccessRoleArn" + ], + "members":{ + "customDataLocation":{ + "shape":"String", + "documentation":"

The location of the partition in the Security Lake S3 bucket.

" + }, + "glueCrawlerName":{ + "shape":"String", + "documentation":"

The name of the Glue crawler.

" + }, + "glueDatabaseName":{ + "shape":"String", + "documentation":"

The Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/*.

" + }, + "glueTableName":{ + "shape":"String", + "documentation":"

The table name of the Glue crawler.

" + }, + "logProviderAccessRoleArn":{ + "shape":"String", + "documentation":"

IAM Role ARN to be used by the entity putting logs into your Custom Source partition. Security Lake will apply the correct access policies to this Role, but this Role must have the trust policy created manually. This Role's name must start with the text 'Security Lake'. It must trust the logProviderAccountId to assume it.

" + } + } + }, + "CreateDatalakeAutoEnableRequest":{ + "type":"structure", + "required":["configurationForNewAccounts"], + "members":{ + "configurationForNewAccounts":{ + "shape":"AutoEnableNewRegionConfigurationList", + "documentation":"

Enable Amazon Security Lake with the specified configurations settings to begin ingesting security data for new accounts in Security Lake.

" + } + } + }, + "CreateDatalakeAutoEnableResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateDatalakeDelegatedAdminRequest":{ + "type":"structure", + "required":["account"], + "members":{ + "account":{ + "shape":"SafeString", + "documentation":"

Account ID of the Security Lake delegated administrator.

" + } + } + }, + "CreateDatalakeDelegatedAdminResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateDatalakeExceptionsSubscriptionRequest":{ + "type":"structure", + "required":[ + "notificationEndpoint", + "subscriptionProtocol" + ], + "members":{ + "notificationEndpoint":{ + "shape":"SafeString", + "documentation":"

The account in which the exception notifications subscription is created.

" + }, + "subscriptionProtocol":{ + "shape":"SubscriptionProtocolType", + "documentation":"

The subscription protocol to which exception messages are posted.

" + } + } + }, + "CreateDatalakeExceptionsSubscriptionResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateDatalakeRequest":{ + "type":"structure", + "members":{ + "configurations":{ + "shape":"LakeConfigurationRequestMap", + "documentation":"

Enable Security Lake with the specified configurations settings to begin ingesting security data.

" + }, + "enableAll":{ + "shape":"Boolean", + "documentation":"

Enable Security Lake in all Regions to begin ingesting security data.

" + }, + "metaStoreManagerRoleArn":{ + "shape":"RoleArn", + "documentation":"

The Role ARN used to create and update the Glue table with partitions generated by ingestion and normalization of Amazon Web Services log sources and custom sources.

" + }, + "regions":{ + "shape":"RegionSet", + "documentation":"

Enable Security Lake in the specified Regions to begin ingesting security data. To enable Security Lake in specific Amazon Web Services Regions, such as us-east-1 or ap-northeast-3, provide the Region codes. For a list of Region codes, see Region codes in the Amazon Web Services General Reference.

" + } + } + }, + "CreateDatalakeResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateSubscriberRequest":{ + "type":"structure", + "required":[ + "accountId", + "externalId", + "sourceTypes", + "subscriberName" + ], + "members":{ + "accessTypes":{ + "shape":"AccessTypeList", + "documentation":"

The Amazon S3 or Lake Formation access type.

" + }, + "accountId":{ + "shape":"AwsAccountId", + "documentation":"

The third party Amazon Web Services account ID used to access your data.

" + }, + "externalId":{ + "shape":"SafeString", + "documentation":"

The external ID of the subscriber. External ID allows the user that is assuming the role to assert the circumstances in which they are operating. It also provides a way for the account owner to permit the role to be assumed only under specific circumstances.

" + }, + "sourceTypes":{ + "shape":"SourceTypeList", + "documentation":"

The supported Amazon Web Services services from which logs and events are collected. Amazon Security Lake supports logs and events collection for natively-supported Amazon Web Services services.

" + }, + "subscriberDescription":{ + "shape":"SafeString", + "documentation":"

The subscriber descriptions for the subscriber account in Amazon Security Lake.

" + }, + "subscriberName":{ + "shape":"CreateSubscriberRequestSubscriberNameString", + "documentation":"

The name of your Amazon Security Lake subscriber account.

" + } + } + }, + "CreateSubscriberRequestSubscriberNameString":{ + "type":"string", + "max":64, + "min":0 + }, + "CreateSubscriberResponse":{ + "type":"structure", + "required":["subscriptionId"], + "members":{ + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) created by the user to provide to the subscriber. For more information about ARNs and how to use them in policies, see IAM identifiers in the IAM User Guide.

" + }, + "s3BucketArn":{ + "shape":"S3BucketArn", + "documentation":"

The Amazon Resource Name (ARN) for the Amazon S3 bucket.

" + }, + "snsArn":{ + "shape":"SnsTopicArn", + "documentation":"

The Amazon Resource Name (ARN) for the Amazon Simple Notification Service.

" + }, + "subscriptionId":{ + "shape":"UUID", + "documentation":"

The subscriptionId that was created by the CreateSubscriber API call.

" + } + } + }, + "CreateSubscriptionNotificationConfigurationRequest":{ + "type":"structure", + "required":["subscriptionId"], + "members":{ + "createSqs":{ + "shape":"Boolean", + "documentation":"

Create a new subscription notification for the specified subscription ID in Security Lake.

" + }, + "httpsApiKeyName":{ + "shape":"String", + "documentation":"

The key name for the subscription notification.

" + }, + "httpsApiKeyValue":{ + "shape":"String", + "documentation":"

The key value for the subscription notification.

" + }, + "httpsMethod":{ + "shape":"HttpsMethod", + "documentation":"

The HTTPS method used for the subscription notification.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) specifying the role of the subscriber.

" + }, + "subscriptionEndpoint":{ + "shape":"CreateSubscriptionNotificationConfigurationRequestSubscriptionEndpointString", + "documentation":"

The subscription endpoint in Security Lake.

" + }, + "subscriptionId":{ + "shape":"UUID", + "documentation":"

The subscription ID for which the subscription notification is specified.

", + "location":"uri", + "locationName":"subscriptionId" + } + } + }, + "CreateSubscriptionNotificationConfigurationRequestSubscriptionEndpointString":{ + "type":"string", + "pattern":"^(arn:aws:.+$|https?://.+$)" + }, + "CreateSubscriptionNotificationConfigurationResponse":{ + "type":"structure", + "members":{ + "queueArn":{ + "shape":"SafeString", + "documentation":"

Returns the Amazon resource name (ARN) of the queue.

" + } + } + }, + "CustomSourceType":{ + "type":"string", + "pattern":"^[\\\\\\w\\-_:/.]*$" + }, + "DeleteAwsLogSourceRequest":{ + "type":"structure", + "required":["inputOrder"], + "members":{ + "disableAllDimensions":{ + "shape":"AllDimensionsMap", + "documentation":"

Removes the specific Amazon Web Services sources from all Regions and source types.

" + }, + "disableSingleDimension":{ + "shape":"InputSet", + "documentation":"

Removes all Amazon Web Services sources from specific accounts or Regions.

" + }, + "disableTwoDimensions":{ + "shape":"TwoDimensionsMap", + "documentation":"

Remove a specific Amazon Web Services source from specific accounts or Regions.

" + }, + "inputOrder":{ + "shape":"DimensionSet", + "documentation":"

This is a mandatory input. Specifies the input order to disable dimensions in Security Lake, namely Region, source type, and member.

" + } + } + }, + "DeleteAwsLogSourceResponse":{ + "type":"structure", + "members":{ + "failed":{ + "shape":"AccountList", + "documentation":"

Deletion of the Amazon Web Services sources failed as the account is not a part of the organization.

" + }, + "processing":{ + "shape":"AccountList", + "documentation":"

Deletion of the Amazon Web Services sources is in-progress.

" + } + } + }, + "DeleteCustomLogSourceRequest":{ + "type":"structure", + "required":["customSourceName"], + "members":{ + "customSourceName":{ + "shape":"String", + "documentation":"

The custom source name for the custome log source.

", + "location":"querystring", + "locationName":"customSourceName" + } + } + }, + "DeleteCustomLogSourceResponse":{ + "type":"structure", + "required":["customDataLocation"], + "members":{ + "customDataLocation":{ + "shape":"String", + "documentation":"

The location of the partition in the Security Lake S3 bucket.

" + } + } + }, + "DeleteDatalakeAutoEnableRequest":{ + "type":"structure", + "required":["removeFromConfigurationForNewAccounts"], + "members":{ + "removeFromConfigurationForNewAccounts":{ + "shape":"AutoEnableNewRegionConfigurationList", + "documentation":"

Delete Amazon Security Lake with the specified configurations settings to stop ingesting security data for new accounts in Security Lake.

" + } + } + }, + "DeleteDatalakeAutoEnableResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteDatalakeDelegatedAdminRequest":{ + "type":"structure", + "required":["account"], + "members":{ + "account":{ + "shape":"SafeString", + "documentation":"

Account ID the Security Lake delegated administrator.

", + "location":"uri", + "locationName":"account" + } + } + }, + "DeleteDatalakeDelegatedAdminResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteDatalakeExceptionsSubscriptionRequest":{ + "type":"structure", + "members":{ + } + }, + "DeleteDatalakeExceptionsSubscriptionResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"SafeString", + "documentation":"

Retrieves the status of the delete Security Lake operation for an account.

" + } + } + }, + "DeleteDatalakeRequest":{ + "type":"structure", + "members":{ + } + }, + "DeleteDatalakeResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteSubscriberRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

A value created by Security Lake that uniquely identifies your DeleteSubscriber API request.

", + "location":"querystring", + "locationName":"id" + } + } + }, + "DeleteSubscriberResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteSubscriptionNotificationConfigurationRequest":{ + "type":"structure", + "required":["subscriptionId"], + "members":{ + "subscriptionId":{ + "shape":"UUID", + "documentation":"

The subscription ID of the Amazon Security Lake subscriber account.

", + "location":"uri", + "locationName":"subscriptionId" + } + } + }, + "DeleteSubscriptionNotificationConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "Dimension":{ + "type":"string", + "enum":[ + "REGION", + "SOURCE_TYPE", + "MEMBER" + ] + }, + "DimensionSet":{ + "type":"list", + "member":{"shape":"Dimension"} + }, + "EndpointProtocol":{ + "type":"string", + "enum":[ + "HTTPS", + "SQS" + ] + }, + "EventBridgeException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Represents an error interacting with the Amazon EventBridge service.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Failures":{ + "type":"structure", + "required":[ + "exceptionMessage", + "remediation", + "timestamp" + ], + "members":{ + "exceptionMessage":{ + "shape":"SafeString", + "documentation":"

List of all exception messages.

" + }, + "remediation":{ + "shape":"SafeString", + "documentation":"

List of all remediation steps for failures.

" + }, + "timestamp":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

This error can occur if you configure the wrong timestamp format, or if the subset of entries used for validation had errors or missing values.

" + } + }, + "documentation":"

List of all failures.

" + }, + "FailuresResponse":{ + "type":"structure", + "members":{ + "failures":{ + "shape":"Failureslist", + "documentation":"

List of all failures.

" + }, + "region":{ + "shape":"SafeString", + "documentation":"

List of Regions where the failure occurred.

" + } + }, + "documentation":"

Response element for actions which make changes namely create, update, or delete actions.

" + }, + "FailuresResponseList":{ + "type":"list", + "member":{"shape":"FailuresResponse"} + }, + "Failureslist":{ + "type":"list", + "member":{"shape":"Failures"} + }, + "GetDatalakeAutoEnableRequest":{ + "type":"structure", + "members":{ + } + }, + "GetDatalakeAutoEnableResponse":{ + "type":"structure", + "required":["autoEnableNewAccounts"], + "members":{ + "autoEnableNewAccounts":{ + "shape":"AutoEnableNewRegionConfigurationList", + "documentation":"

The configuration for new accounts.

" + } + } + }, + "GetDatalakeExceptionsExpiryRequest":{ + "type":"structure", + "members":{ + } + }, + "GetDatalakeExceptionsExpiryResponse":{ + "type":"structure", + "required":["exceptionMessageExpiry"], + "members":{ + "exceptionMessageExpiry":{ + "shape":"Long", + "documentation":"

The expiration period and time-to-live (TTL).

" + } + } + }, + "GetDatalakeExceptionsSubscriptionRequest":{ + "type":"structure", + "members":{ + } + }, + "GetDatalakeExceptionsSubscriptionResponse":{ + "type":"structure", + "required":["protocolAndNotificationEndpoint"], + "members":{ + "protocolAndNotificationEndpoint":{ + "shape":"ProtocolAndNotificationEndpoint", + "documentation":"

Retrieves the exception notification subscription information.

" + } + } + }, + "GetDatalakeRequest":{ + "type":"structure", + "members":{ + } + }, + "GetDatalakeResponse":{ + "type":"structure", + "required":["configurations"], + "members":{ + "configurations":{ + "shape":"LakeConfigurationResponseMap", + "documentation":"

Retrieves the Security Lake configuration object.

" + } + } + }, + "GetDatalakeStatusRequest":{ + "type":"structure", + "members":{ + "accountSet":{ + "shape":"InputSet", + "documentation":"

The account IDs for which a static snapshot of the current Region, including enabled accounts and log sources is retrieved.

" + }, + "maxAccountResults":{ + "shape":"Integer", + "documentation":"

The maximum limit of accounts for which the static snapshot of the current Region including enabled accounts and log sources is retrieved.

" + }, + "nextToken":{ + "shape":"SafeString", + "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + } + } + }, + "GetDatalakeStatusResponse":{ + "type":"structure", + "required":["accountSourcesList"], + "members":{ + "accountSourcesList":{ + "shape":"AccountSourcesList", + "documentation":"

The list of enabled accounts and enabled sources.

" + }, + "nextToken":{ + "shape":"SafeString", + "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + } + } + }, + "GetSubscriberRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

A value created by Security Lake that uniquely identifies your GetSubscriber API request.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetSubscriberResponse":{ + "type":"structure", + "members":{ + "subscriber":{ + "shape":"SubscriberResource", + "documentation":"

Subscription information for the specified subscription ID

" + } + } + }, + "HttpsMethod":{ + "type":"string", + "enum":[ + "POST", + "PUT" + ] + }, + "InputSet":{ + "type":"list", + "member":{"shape":"SafeString"} + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

Retry the request after the specified time.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

Internal service exceptions are sometimes caused by transient issues. Before you start troubleshooting, perform the operation again.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "InvalidInputException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request was rejected because an invalid or out-of-range value was supplied for an input parameter.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LakeConfigurationRequest":{ + "type":"structure", + "members":{ + "encryptionKey":{ + "shape":"String", + "documentation":"

The type of encryption key used by Security Lake to encrypt the lake configuration object.

" + }, + "replicationDestinationRegions":{ + "shape":"RegionSet", + "documentation":"

Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same Amazon Web Services account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Amazon Web Services Regions or within the same Region as the source bucket.

Set up one or more rollup Regions by providing the Region or Regions that should contribute to the central rollup Region.

" + }, + "replicationRoleArn":{ + "shape":"RoleArn", + "documentation":"

Replication settings for the Amazon S3 buckets. This parameter uses the IAM role created by you that is managed by Security Lake, to ensure the replication setting is correct.

" + }, + "retentionSettings":{ + "shape":"RetentionSettingList", + "documentation":"

Retention settings for the destination Amazon S3 buckets.

" + }, + "tagsMap":{ + "shape":"TagsMap", + "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value, both of which you define.

" + } + }, + "documentation":"

Provides details of lake configuration object in Amazon Security Lake.

" + }, + "LakeConfigurationRequestMap":{ + "type":"map", + "key":{"shape":"Region"}, + "value":{"shape":"LakeConfigurationRequest"} + }, + "LakeConfigurationResponse":{ + "type":"structure", + "members":{ + "encryptionKey":{ + "shape":"String", + "documentation":"

The type of encryption key used by Security Lake to encrypt the lake configuration

" + }, + "replicationDestinationRegions":{ + "shape":"RegionSet", + "documentation":"

Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same AWS account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Amazon Web Services Regions or within the same Region as the source bucket.

Set up one or more rollup Regions by providing the Region or Regions that should contribute to the central rollup Region.

" + }, + "replicationRoleArn":{ + "shape":"RoleArn", + "documentation":"

Replication settings for the Amazon S3 buckets. This parameter uses the IAM role created by you that is managed by Security Lake, to ensure the replication setting is correct.

" + }, + "retentionSettings":{ + "shape":"RetentionSettingList", + "documentation":"

Retention settings for the destination Amazon S3 buckets.

" + }, + "s3BucketArn":{ + "shape":"S3BucketArn", + "documentation":"

Amazon Resource Names (ARNs) uniquely identify Amazon Web Services resources. Security Lake requires an ARN when you need to specify a resource unambiguously across all of Amazon Web Services, such as in IAM policies, Amazon Relational Database Service (Amazon RDS) tags, and API calls.

" + }, + "status":{ + "shape":"settingsStatus", + "documentation":"

Retrieves the status of the configuration operation for an account in Amazon Security Lake.

" + }, + "tagsMap":{ + "shape":"TagsMap", + "documentation":"

A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value, both of which you define.

" + } + }, + "documentation":"

Provides details of lake configuration object in Amazon Security Lake.

" + }, + "LakeConfigurationResponseMap":{ + "type":"map", + "key":{"shape":"Region"}, + "value":{"shape":"LakeConfigurationResponse"} + }, + "ListDatalakeExceptionsRequest":{ + "type":"structure", + "members":{ + "maxFailures":{ + "shape":"Integer", + "documentation":"

List the maximum number of failures in Security Lake.

" + }, + "nextToken":{ + "shape":"SafeString", + "documentation":"

List if there are more results available. if nextToken is returned, You can make the call again using the returned token to retrieve the next page

" + }, + "regionSet":{ + "shape":"RegionSet", + "documentation":"

List the regions from which exceptions are retrieved.

" + } + } + }, + "ListDatalakeExceptionsResponse":{ + "type":"structure", + "required":["nonRetryableFailures"], + "members":{ + "nextToken":{ + "shape":"SafeString", + "documentation":"

List if there are more results available. if nextToken is returned, You can make the call again using the returned token to retrieve the next page

" + }, + "nonRetryableFailures":{ + "shape":"FailuresResponseList", + "documentation":"

Lists the non-retryable failures in the current region.

" + } + } + }, + "ListLogSourcesRequest":{ + "type":"structure", + "members":{ + "inputOrder":{ + "shape":"DimensionSet", + "documentation":"

Lists the log sources in input order, namely Region, source type, and member account.

" + }, + "listAllDimensions":{ + "shape":"AllDimensionsMap", + "documentation":"

List the view of log sources for enabled Security Lake accounts in all Regions and source types.

" + }, + "listSingleDimension":{ + "shape":"InputSet", + "documentation":"

List the view of log sources for enabled Security Lake accounts for the entire region.

" + }, + "listTwoDimensions":{ + "shape":"TwoDimensionsMap", + "documentation":"

Lists the log sources for the specified source types in enabled Security Lake accounts for the entire Region, for selected member accounts.

" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of accounts for which the configuration is displayed.

" + }, + "nextToken":{ + "shape":"SafeString", + "documentation":"

If nextToken is returned, there are more results available. You can make the call again using the returned token to retrieve the next page.

" + } + } + }, + "ListLogSourcesResponse":{ + "type":"structure", + "required":["regionSourceTypesAccountsList"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

If nextToken is returned, there are more results available. You can make the call again using the returned token to retrieve the next page.

" + }, + "regionSourceTypesAccountsList":{ + "shape":"RegionSourceTypesAccountsList", + "documentation":"

Lists the log sources in the Regions for enabled Security Lake accounts.

" + } + } + }, + "ListSubscribersRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of accounts for which the configuration is displayed.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"SafeString", + "documentation":"

If nextToken is returned, there are more results available. You can make the call again using the returned token to retrieve the next page.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSubscribersResponse":{ + "type":"structure", + "required":["subscribers"], + "members":{ + "nextToken":{ + "shape":"SafeString", + "documentation":"

If nextToken is returned, there are more results available. You can make the call again using the returned token to retrieve the next page.

" + }, + "subscribers":{ + "shape":"SubscriberList", + "documentation":"

The subscribers available in the specified Security Lake account ID.

" + } + } + }, + "LogsStatus":{ + "type":"structure", + "required":[ + "healthStatus", + "pathToLogs" + ], + "members":{ + "healthStatus":{ + "shape":"SourceStatus", + "documentation":"

Health status of services including error codes and patterns.

" + }, + "pathToLogs":{ + "shape":"String", + "documentation":"

Defines path the stored logs are available which has information on your systems, applications, and services.

" + } + }, + "documentation":"

Log status for the Security Lake account.

" + }, + "LogsStatusList":{ + "type":"list", + "member":{"shape":"LogsStatus"} + }, + "Long":{ + "type":"long", + "box":true + }, + "OcsfEventClass":{ + "type":"string", + "enum":[ + "ACCESS_ACTIVITY", + "FILE_ACTIVITY", + "KERNEL_ACTIVITY", + "KERNEL_EXTENSION", + "MEMORY_ACTIVITY", + "MODULE_ACTIVITY", + "PROCESS_ACTIVITY", + "REGISTRY_KEY_ACTIVITY", + "REGISTRY_VALUE_ACTIVITY", + "RESOURCE_ACTIVITY", + "SCHEDULED_JOB_ACTIVITY", + "SECURITY_FINDING", + "ACCOUNT_CHANGE", + "AUTHENTICATION", + "AUTHORIZATION", + "ENTITY_MANAGEMENT_AUDIT", + "DHCP_ACTIVITY", + "NETWORK_ACTIVITY", + "DNS_ACTIVITY", + "FTP_ACTIVITY", + "HTTP_ACTIVITY", + "RDP_ACTIVITY", + "SMB_ACTIVITY", + "SSH_ACTIVITY", + "CLOUD_API", + "CONTAINER_LIFECYCLE", + "DATABASE_LIFECYCLE", + "CONFIG_STATE", + "CLOUD_STORAGE", + "INVENTORY_INFO", + "RFB_ACTIVITY", + "SMTP_ACTIVITY", + "VIRTUAL_MACHINE_ACTIVITY" + ] + }, + "ProtocolAndNotificationEndpoint":{ + "type":"structure", + "members":{ + "endpoint":{ + "shape":"SafeString", + "documentation":"

The account which is subscribed to receive exception notifications.

" + }, + "protocol":{ + "shape":"SafeString", + "documentation":"

The protocol to which notification messages are posted.

" + } + }, + "documentation":"

Notifications in Security Lake which dictates how notifications are posted at the endpoint.

" + }, + "Region":{ + "type":"string", + "enum":[ + "us-east-1", + "us-west-2", + "eu-central-1", + "us-east-2", + "eu-west-1", + "ap-northeast-1", + "ap-southeast-2" + ] + }, + "RegionSet":{ + "type":"list", + "member":{"shape":"Region"} + }, + "RegionSourceTypesAccountsList":{ + "type":"list", + "member":{"shape":"AllDimensionsMap"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The ID of the resource for which the type of resource could not be found.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The type of the resource that could not be found.

" + } + }, + "documentation":"

The resource could not be found.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "RetentionSetting":{ + "type":"structure", + "members":{ + "retentionPeriod":{ + "shape":"RetentionSettingRetentionPeriodInteger", + "documentation":"

The retention period specifies a fixed period of time during which the Security Lake object remains locked. You can specify the retention period for one or more source in days.

" + }, + "storageClass":{ + "shape":"StorageClass", + "documentation":"

The range of storage classes that you can choose from based on the data access, resiliency, and cost requirements of your workloads.

" + } + }, + "documentation":"

Retention settings for the destination Amazon S3 buckets in Security Lake.

" + }, + "RetentionSettingList":{ + "type":"list", + "member":{"shape":"RetentionSetting"} + }, + "RetentionSettingRetentionPeriodInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "RoleArn":{ + "type":"string", + "pattern":"^arn:.*" + }, + "S3BucketArn":{"type":"string"}, + "S3Exception":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Provides an extension of the AmazonServiceException for errors reported by Amazon S3 while processing a request. In particular, this class provides access to Amazon S3's extended request ID. This ID is required debugging information in the case the user needs to contact Amazon about an issue where Amazon S3 is incorrectly handling a request.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SafeString":{ + "type":"string", + "pattern":"^[\\\\\\w\\-_:/.@=+]*$" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "quotaCode", + "resourceId", + "resourceType", + "serviceCode" + ], + "members":{ + "message":{"shape":"String"}, + "quotaCode":{ + "shape":"String", + "documentation":"

That the rate of requests to Security Lake is exceeding the request quotas for your Amazon Web Services account.

" + }, + "resourceId":{ + "shape":"String", + "documentation":"

The ID of the resource that exceeds the service quota.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

The type of the resource that exceeds the service quota.

" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

The code for the service in Service Quotas.

" + } + }, + "documentation":"

You have exceeded your service quota. To perform the requested action, remove some of the relevant resources, or use Service Quotas to request a service quota increase.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SnsTopicArn":{"type":"string"}, + "SourceStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DEACTIVATED", + "PENDING" + ] + }, + "SourceType":{ + "type":"structure", + "members":{ + "awsSourceType":{ + "shape":"AwsLogSourceType", + "documentation":"

Amazon Security Lake supports logs and events collection for natively-supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide.

" + }, + "customSourceType":{ + "shape":"CustomSourceType", + "documentation":"

Amazon Security Lake supports custom source types. For the detailed list, see the Amazon Security Lake User Guide.

" + } + }, + "documentation":"

The supported source types from which logs and events are collected in Amazon Security Lake.

", + "union":true + }, + "SourceTypeList":{ + "type":"list", + "member":{"shape":"SourceType"} + }, + "StorageClass":{ + "type":"string", + "enum":[ + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER_IR", + "GLACIER", + "DEEP_ARCHIVE", + "EXPIRE" + ] + }, + "String":{"type":"string"}, + "SubscriberList":{ + "type":"list", + "member":{"shape":"SubscriberResource"} + }, + "SubscriberResource":{ + "type":"structure", + "required":[ + "accountId", + "sourceTypes", + "subscriptionId" + ], + "members":{ + "accessTypes":{ + "shape":"AccessTypeList", + "documentation":"

You can choose to notify subscribers of new objects with an Amazon Simple Queue Service (Amazon SQS) queue or through messaging to an HTTPS endpoint provided by the subscriber.

Subscribers can consume data by directly querying Lake Formation tables in your S3 bucket via services like Amazon Athena. This subscription type is defined as LAKEFORMATION.

" + }, + "accountId":{ + "shape":"AwsAccountId", + "documentation":"

The Amazon Web Services account ID of the account that you are using to create your Amazon Security Lake account.

" + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The date and time when the subscription was created.

" + }, + "externalId":{ + "shape":"SafeString", + "documentation":"

The external ID of the subscriber. External ID allows the user that is assuming the role to assert the circumstances in which they are operating. It also provides a way for the account owner to permit the role to be assumed only under specific circumstances.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) specifying the role of the subscriber.

" + }, + "s3BucketArn":{ + "shape":"S3BucketArn", + "documentation":"

The Amazon Resource Name (ARN) for the Amazon S3 bucket.

" + }, + "snsArn":{ + "shape":"SnsTopicArn", + "documentation":"

The Amazon Resource Name (ARN) for the Amazon Simple Notification Service.

" + }, + "sourceTypes":{ + "shape":"SourceTypeList", + "documentation":"

Amazon Security Lake supports logs and events collection for the natively-supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide.

" + }, + "subscriberDescription":{ + "shape":"SafeString", + "documentation":"

The subscriber descriptions for a subscriber account. The description for a subscriber includes subscriberName, accountID, externalID, and subscriptionId.

" + }, + "subscriberName":{ + "shape":"SafeString", + "documentation":"

The name of your Amazon Security Lake subscriber account.

" + }, + "subscriptionEndpoint":{ + "shape":"String", + "documentation":"

The subscription endpoint to which exception messages are posted.

" + }, + "subscriptionId":{ + "shape":"UUID", + "documentation":"

The subscription ID of the Amazon Security Lake subscriber account.

" + }, + "subscriptionProtocol":{ + "shape":"EndpointProtocol", + "documentation":"

The subscription protocol to which exception messages are posted.

" + }, + "subscriptionStatus":{ + "shape":"SubscriptionStatus", + "documentation":"

Subscription status of the Amazon Security Lake subscriber account.

" + }, + "updatedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The date and time when the subscription was created.

" + } + }, + "documentation":"

Provides details of the Amazon Security Lake account subscription. Subscribers are notified of new objects for a source as the data is written to your Amazon Security Lake S3 bucket.

" + }, + "SubscriptionProtocolType":{ + "type":"string", + "enum":[ + "HTTP", + "HTTPS", + "EMAIL", + "EMAIL_JSON", + "SMS", + "SQS", + "LAMBDA", + "APP", + "FIREHOSE" + ] + }, + "SubscriptionStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DEACTIVATED", + "PENDING", + "READY" + ] + }, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "TagsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "quotaCode":{ + "shape":"String", + "documentation":"

That the rate of requests to Security Lake is exceeding the request quotas for your Amazon Web Services account.

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

Retry the request after the specified time.

", + "location":"header", + "locationName":"Retry-After" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

The code for the service in Service Quotas.

" + } + }, + "documentation":"

The limit on the number of requests per second was exceeded.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "TwoDimensionsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"ValueSet"} + }, + "UUID":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "UpdateDatalakeExceptionsExpiryRequest":{ + "type":"structure", + "required":["exceptionMessageExpiry"], + "members":{ + "exceptionMessageExpiry":{ + "shape":"UpdateDatalakeExceptionsExpiryRequestExceptionMessageExpiryLong", + "documentation":"

The time-to-live (TTL) for the exception message to remain.

" + } + } + }, + "UpdateDatalakeExceptionsExpiryRequestExceptionMessageExpiryLong":{ + "type":"long", + "box":true, + "min":1 + }, + "UpdateDatalakeExceptionsExpiryResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDatalakeExceptionsSubscriptionRequest":{ + "type":"structure", + "required":[ + "notificationEndpoint", + "subscriptionProtocol" + ], + "members":{ + "notificationEndpoint":{ + "shape":"SafeString", + "documentation":"

The account which is subscribed to receive exception notifications.

" + }, + "subscriptionProtocol":{ + "shape":"SubscriptionProtocolType", + "documentation":"

The subscription protocol to which exception messages are posted.

" + } + } + }, + "UpdateDatalakeExceptionsSubscriptionResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDatalakeRequest":{ + "type":"structure", + "required":["configurations"], + "members":{ + "configurations":{ + "shape":"LakeConfigurationRequestMap", + "documentation":"

The configuration object

" + } + } + }, + "UpdateDatalakeResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateSubscriberRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "externalId":{ + "shape":"SafeString", + "documentation":"

External ID of the Security Lake account.

" + }, + "id":{ + "shape":"String", + "documentation":"

A value created by Security Lake that uniquely identifies your UpdateSubscriber API request.

", + "location":"uri", + "locationName":"id" + }, + "sourceTypes":{ + "shape":"SourceTypeList", + "documentation":"

The supported Amazon Web Services services from which logs and events are collected. Amazon Security Lake supports logs and events collection for the following natively-supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide.

" + }, + "subscriberDescription":{ + "shape":"SafeString", + "documentation":"

Description of the Security Lake account subscriber.

" + }, + "subscriberName":{ + "shape":"UpdateSubscriberRequestSubscriberNameString", + "documentation":"

Name of the Security Lake account subscriber.

" + } + } + }, + "UpdateSubscriberRequestSubscriberNameString":{ + "type":"string", + "max":64, + "min":0, + "pattern":"^[\\\\\\w\\-_:/.@=+]*$" + }, + "UpdateSubscriberResponse":{ + "type":"structure", + "members":{ + "subscriber":{ + "shape":"SubscriberResource", + "documentation":"

The account subscriber in Amazon Security Lake.

" + } + } + }, + "UpdateSubscriptionNotificationConfigurationRequest":{ + "type":"structure", + "required":["subscriptionId"], + "members":{ + "createSqs":{ + "shape":"Boolean", + "documentation":"

Create a new subscription notification for the specified subscription ID in Security Lake.

" + }, + "httpsApiKeyName":{ + "shape":"String", + "documentation":"

The key name for the subscription notification.

" + }, + "httpsApiKeyValue":{ + "shape":"String", + "documentation":"

The key value for the subscription notification.

" + }, + "httpsMethod":{ + "shape":"HttpsMethod", + "documentation":"

The HTTPS method used for the subscription notification.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) specifying the role of the subscriber.

" + }, + "subscriptionEndpoint":{ + "shape":"UpdateSubscriptionNotificationConfigurationRequestSubscriptionEndpointString", + "documentation":"

The subscription endpoint in Security Lake.

" + }, + "subscriptionId":{ + "shape":"UUID", + "documentation":"

The subscription ID for which the subscription notification is specified.

", + "location":"uri", + "locationName":"subscriptionId" + } + } + }, + "UpdateSubscriptionNotificationConfigurationRequestSubscriptionEndpointString":{ + "type":"string", + "pattern":"^(arn:aws:.+$|https?://.+$)" + }, + "UpdateSubscriptionNotificationConfigurationResponse":{ + "type":"structure", + "members":{ + "queueArn":{ + "shape":"SafeString", + "documentation":"

Returns the Amazon resource name (ARN) of the queue.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

The list of parameters that failed to validate.

" + }, + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

The reason for the validation exception.

" + } + }, + "documentation":"

Your signing certificate could not be validated.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "message", + "name" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

Describes the error encountered.

" + }, + "name":{ + "shape":"String", + "documentation":"

Name of the validation exception.

" + } + }, + "documentation":"

The input fails to meet the constraints specified in Amazon Security Lake

" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + }, + "ValueSet":{ + "type":"list", + "member":{"shape":"String"} + }, + "settingsStatus":{ + "type":"string", + "enum":[ + "INITIALIZED", + "PENDING", + "COMPLETED", + "FAILED" + ] + } + }, + "documentation":"

Amazon Security Lake is in preview release. Your use of the Amazon Security Lake preview is subject to Section 2 of the Amazon Web Services Service Terms(\"Betas and Previews\").

Amazon Security Lake is a fully-managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your account. Security Lake helps you analyze security data, so you can get a more complete understanding of your security posture across the entire organization and improve the protection of your workloads, applications, and data.

The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data.

Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services. and third-party services and manages the lifecycle of data with customizable retention and replication settings. Security Lake also converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF).

Other Amazon Web Services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics.

" +} diff --git a/botocore/data/simspaceweaver/2022-10-28/endpoint-rule-set-1.json b/botocore/data/simspaceweaver/2022-10-28/endpoint-rule-set-1.json new file mode 100644 index 0000000000..b458c7afe9 --- /dev/null +++ b/botocore/data/simspaceweaver/2022-10-28/endpoint-rule-set-1.json @@ -0,0 +1,309 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://simspaceweaver-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://simspaceweaver-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://simspaceweaver.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://simspaceweaver.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] +} \ No newline at end of file diff --git a/botocore/data/simspaceweaver/2022-10-28/paginators-1.json b/botocore/data/simspaceweaver/2022-10-28/paginators-1.json new file mode 100644 index 0000000000..ea142457a6 --- /dev/null +++ b/botocore/data/simspaceweaver/2022-10-28/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/simspaceweaver/2022-10-28/service-2.json b/botocore/data/simspaceweaver/2022-10-28/service-2.json new file mode 100644 index 0000000000..dc72971634 --- /dev/null +++ b/botocore/data/simspaceweaver/2022-10-28/service-2.json @@ -0,0 +1,1232 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2022-10-28", + "endpointPrefix":"simspaceweaver", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS SimSpace Weaver", + "serviceId":"SimSpaceWeaver", + "signatureVersion":"v4", + "signingName":"simspaceweaver", + "uid":"simspaceweaver-2022-10-28" + }, + "operations":{ + "DeleteApp":{ + "name":"DeleteApp", + "http":{ + "method":"DELETE", + "requestUri":"/deleteapp", + "responseCode":200 + }, + "input":{"shape":"DeleteAppInput"}, + "output":{"shape":"DeleteAppOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes the instance of the given custom app.

", + "idempotent":true + }, + "DeleteSimulation":{ + "name":"DeleteSimulation", + "http":{ + "method":"DELETE", + "requestUri":"/deletesimulation", + "responseCode":200 + }, + "input":{"shape":"DeleteSimulationInput"}, + "output":{"shape":"DeleteSimulationOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes all SimSpace Weaver resources assigned to the given simulation.

Your simulation uses resources in other Amazon Web Services services. This API operation doesn't delete resources in other Amazon Web Services services.

", + "idempotent":true + }, + "DescribeApp":{ + "name":"DescribeApp", + "http":{ + "method":"GET", + "requestUri":"/describeapp", + "responseCode":200 + }, + "input":{"shape":"DescribeAppInput"}, + "output":{"shape":"DescribeAppOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the state of the given custom app.

" + }, + "DescribeSimulation":{ + "name":"DescribeSimulation", + "http":{ + "method":"GET", + "requestUri":"/describesimulation", + "responseCode":200 + }, + "input":{"shape":"DescribeSimulationInput"}, + "output":{"shape":"DescribeSimulationOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the current state of the given simulation.

" + }, + "ListApps":{ + "name":"ListApps", + "http":{ + "method":"GET", + "requestUri":"/listapps", + "responseCode":200 + }, + "input":{"shape":"ListAppsInput"}, + "output":{"shape":"ListAppsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all custom apps or service apps for the given simulation and domain.

" + }, + "ListSimulations":{ + "name":"ListSimulations", + "http":{ + "method":"GET", + "requestUri":"/listsimulations", + "responseCode":200 + }, + "input":{"shape":"ListSimulationsInput"}, + "output":{"shape":"ListSimulationsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists the SimSpace Weaver simulations in the Amazon Web Services account used to make the API call.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all tags on a SimSpace Weaver resource.

" + }, + "StartApp":{ + "name":"StartApp", + "http":{ + "method":"POST", + "requestUri":"/startapp", + "responseCode":200 + }, + "input":{"shape":"StartAppInput"}, + "output":{"shape":"StartAppOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Starts a custom app with the configuration specified in the simulation schema.

" + }, + "StartClock":{ + "name":"StartClock", + "http":{ + "method":"POST", + "requestUri":"/startclock", + "responseCode":200 + }, + "input":{"shape":"StartClockInput"}, + "output":{"shape":"StartClockOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Starts the simulation clock.

" + }, + "StartSimulation":{ + "name":"StartSimulation", + "http":{ + "method":"POST", + "requestUri":"/startsimulation", + "responseCode":200 + }, + "input":{"shape":"StartSimulationInput"}, + "output":{"shape":"StartSimulationOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Starts a simulation with the given name and schema.

" + }, + "StopApp":{ + "name":"StopApp", + "http":{ + "method":"POST", + "requestUri":"/stopapp", + "responseCode":200 + }, + "input":{"shape":"StopAppInput"}, + "output":{"shape":"StopAppOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Stops the given custom app and shuts down all of its allocated compute resources.

" + }, + "StopClock":{ + "name":"StopClock", + "http":{ + "method":"POST", + "requestUri":"/stopclock", + "responseCode":200 + }, + "input":{"shape":"StopClockInput"}, + "output":{"shape":"StopClockOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Stops the simulation clock.

" + }, + "StopSimulation":{ + "name":"StopSimulation", + "http":{ + "method":"POST", + "requestUri":"/stopsimulation", + "responseCode":200 + }, + "input":{"shape":"StopSimulationInput"}, + "output":{"shape":"StopSimulationOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Stops the given simulation.

You can't restart a simulation after you stop it. If you need to restart a simulation, you must stop it, delete it, and start a new instance of it.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"TooManyTagsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Adds tags to a SimSpace Weaver resource. For more information about tags, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Removes tags from a SimSpace Weaver resource. For more information about tags, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AppPortMappings":{ + "type":"list", + "member":{"shape":"SimulationAppPortMapping"} + }, + "BucketName":{ + "type":"string", + "max":63, + "min":3 + }, + "ClientToken":{ + "type":"string", + "max":128, + "min":32, + "pattern":"^[a-zA-Z0-9-]+$", + "sensitive":true + }, + "ClockStatus":{ + "type":"string", + "enum":[ + "UNKNOWN", + "STARTING", + "STARTED", + "STOPPING", + "STOPPED" + ] + }, + "ClockTargetStatus":{ + "type":"string", + "enum":[ + "UNKNOWN", + "STARTED", + "STOPPED" + ] + }, + "CloudWatchLogsLogGroup":{ + "type":"structure", + "members":{ + "LogGroupArn":{ + "shape":"LogGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log group for the simulation. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference. For more information about log groups, see Working with log groups and log streams in the Amazon CloudWatch Logs User Guide.

" + } + }, + "documentation":"

The Amazon CloudWatch Logs log group for the simulation. For more information about log groups, see Working with log groups and log streams in the Amazon CloudWatch Logs User Guide.

" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "DeleteAppInput":{ + "type":"structure", + "required":[ + "App", + "Domain", + "Simulation" + ], + "members":{ + "App":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the app.

", + "location":"querystring", + "locationName":"app" + }, + "Domain":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the domain of the app.

", + "location":"querystring", + "locationName":"domain" + }, + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation of the app.

", + "location":"querystring", + "locationName":"simulation" + } + } + }, + "DeleteAppOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteSimulationInput":{ + "type":"structure", + "required":["Simulation"], + "members":{ + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation.

", + "location":"querystring", + "locationName":"simulation" + } + } + }, + "DeleteSimulationOutput":{ + "type":"structure", + "members":{ + } + }, + "DescribeAppInput":{ + "type":"structure", + "required":[ + "App", + "Domain", + "Simulation" + ], + "members":{ + "App":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the app.

", + "location":"querystring", + "locationName":"app" + }, + "Domain":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the domain of the app.

", + "location":"querystring", + "locationName":"domain" + }, + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation of the app.

", + "location":"querystring", + "locationName":"simulation" + } + } + }, + "DescribeAppOutput":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"Description", + "documentation":"

The description of the app.

" + }, + "Domain":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the domain of the app.

" + }, + "EndpointInfo":{ + "shape":"SimulationAppEndpointInfo", + "documentation":"

Information about the network endpoint for the custom app. You can use the endpoint to connect to the custom app.

" + }, + "LaunchOverrides":{"shape":"LaunchOverrides"}, + "Name":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the app.

" + }, + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation of the app.

" + }, + "Status":{ + "shape":"SimulationAppStatus", + "documentation":"

The current lifecycle state of the custom app.

" + }, + "TargetStatus":{ + "shape":"SimulationAppTargetStatus", + "documentation":"

The desired lifecycle state of the custom app.

" + } + } + }, + "DescribeSimulationInput":{ + "type":"structure", + "required":["Simulation"], + "members":{ + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation.

", + "location":"querystring", + "locationName":"simulation" + } + } + }, + "DescribeSimulationOutput":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"SimSpaceWeaverArn", + "documentation":"

The Amazon Resource Name (ARN) of the simulation. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the simulation was created, expressed as the number of seconds and milliseconds in UTC since the Unix epoch (0:0:0.000, January 1, 1970).

" + }, + "Description":{ + "shape":"Description", + "documentation":"

The description of the simulation.

" + }, + "ExecutionId":{ + "shape":"UUID", + "documentation":"

A universally unique identifier (UUID) for this simulation.

" + }, + "LiveSimulationState":{ + "shape":"LiveSimulationState", + "documentation":"

A collection of additional state information, such as domain and clock configuration.

" + }, + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

Settings that control how SimSpace Weaver handles your simulation log data.

" + }, + "MaximumDuration":{ + "shape":"TimeToLiveString", + "documentation":"

The maximum running time of the simulation, specified as a number of months (m or M), hours (h or H), or days (d or D). The simulation stops when it reaches this limit.

" + }, + "Name":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that the simulation assumes to perform actions. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference. For more information about IAM roles, see IAM roles in the Identity and Access Management User Guide.

" + }, + "SchemaError":{ + "shape":"OptionalString", + "documentation":"

An error message that SimSpace Weaver returns only if there is a problem with the simulation schema.

" + }, + "SchemaS3Location":{ + "shape":"S3Location", + "documentation":"

The location of the simulation schema in Amazon Simple Storage Service (Amazon S3). For more information about Amazon S3, see the Amazon Simple Storage Service User Guide .

" + }, + "Status":{ + "shape":"SimulationStatus", + "documentation":"

The current lifecycle state of the simulation.

" + }, + "TargetStatus":{ + "shape":"SimulationTargetStatus", + "documentation":"

The desired lifecycle state of the simulation.

" + } + } + }, + "Description":{ + "type":"string", + "max":500, + "min":0 + }, + "Domain":{ + "type":"structure", + "members":{ + "Lifecycle":{ + "shape":"LifecycleManagementStrategy", + "documentation":"

The type of lifecycle management for apps in the domain. This value indicates whether apps in this domain are managed (SimSpace Weaver starts and stops the apps) or unmanaged (you must start and stop the apps).

Lifecycle types

The lifecycle types will change when the service is released for general availability (GA).

" + }, + "Name":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the domain.

" + } + }, + "documentation":"

A collection of app instances that run the same executable app code and have the same launch options and commands.

For more information about domains, see Key concepts in the Amazon Web Services SimSpace Weaver User Guide.

" + }, + "DomainList":{ + "type":"list", + "member":{"shape":"Domain"} + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "LaunchCommandList":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, + "LaunchOverrides":{ + "type":"structure", + "members":{ + "LaunchCommands":{ + "shape":"LaunchCommandList", + "documentation":"

App launch commands and command line parameters that override the launch command configured in the simulation schema.

" + } + }, + "documentation":"

Options that apply when the app starts. These optiAons override default behavior.

" + }, + "LifecycleManagementStrategy":{ + "type":"string", + "enum":[ + "Unknown", + "PerWorker", + "BySpatialSubdivision", + "ByRequest" + ] + }, + "ListAppsInput":{ + "type":"structure", + "required":["Simulation"], + "members":{ + "Domain":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the domain that you want to list apps for.

", + "location":"querystring", + "locationName":"domain" + }, + "MaxResults":{ + "shape":"PositiveInteger", + "documentation":"

The maximum number of apps to list.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"OptionalString", + "documentation":"

If SimSpace Weaver returns nextToken, there are more results available. The value of nextToken is a unique pagination token for each page. To retrieve the next page, call the operation again using the returned token. Keep all other arguments unchanged. If no results remain, nextToken is set to null. Each pagination token expires after 24 hours. If you provide a token that isn't valid, you receive an HTTP 400 ValidationException error.

", + "location":"querystring", + "locationName":"nextToken" + }, + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation that you want to list apps for.

", + "location":"querystring", + "locationName":"simulation" + } + } + }, + "ListAppsOutput":{ + "type":"structure", + "members":{ + "Apps":{ + "shape":"SimulationAppList", + "documentation":"

The list of apps for the given simulation and domain.

" + }, + "NextToken":{ + "shape":"OptionalString", + "documentation":"

If SimSpace Weaver returns nextToken, there are more results available. The value of nextToken is a unique pagination token for each page. To retrieve the next page, call the operation again using the returned token. Keep all other arguments unchanged. If no results remain, nextToken is set to null. Each pagination token expires after 24 hours. If you provide a token that isn't valid, you receive an HTTP 400 ValidationException error.

" + } + } + }, + "ListSimulationsInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"PositiveInteger", + "documentation":"

The maximum number of simulations to list.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"OptionalString", + "documentation":"

If SimSpace Weaver returns nextToken, there are more results available. The value of nextToken is a unique pagination token for each page. To retrieve the next page, call the operation again using the returned token. Keep all other arguments unchanged. If no results remain, nextToken is set to null. Each pagination token expires after 24 hours. If you provide a token that isn't valid, you receive an HTTP 400 ValidationException error.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSimulationsOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"OptionalString", + "documentation":"

If SimSpace Weaver returns nextToken, there are more results available. The value of nextToken is a unique pagination token for each page. To retrieve the next page, call the operation again using the returned token. Keep all other arguments unchanged. If no results remain, nextToken is set to null. Each pagination token expires after 24 hours. If you provide a token that isn't valid, you receive an HTTP 400 ValidationException error.

" + }, + "Simulations":{ + "shape":"SimulationList", + "documentation":"

The list of simulations.

" + } + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"SimSpaceWeaverArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

The list of tags for the resource.

" + } + } + }, + "LiveSimulationState":{ + "type":"structure", + "members":{ + "Clocks":{ + "shape":"SimulationClockList", + "documentation":"

A list of simulation clocks.

At this time, a simulation has only one clock.

" + }, + "Domains":{ + "shape":"DomainList", + "documentation":"

A list of domains for the simulation. For more information about domains, see Key concepts in the Amazon Web Services SimSpace Weaver User Guide.

" + } + }, + "documentation":"

A collection of additional state information, such as domain and clock configuration.

" + }, + "LogDestination":{ + "type":"structure", + "members":{ + "CloudWatchLogsLogGroup":{ + "shape":"CloudWatchLogsLogGroup", + "documentation":"

An Amazon CloudWatch Logs log group that stores simulation log data. For more information about log groups, see Working with log groups and log streams in the Amazon CloudWatch Logs User Guide.

" + } + }, + "documentation":"

The location where SimSpace Weaver sends simulation log data.

" + }, + "LogDestinations":{ + "type":"list", + "member":{"shape":"LogDestination"} + }, + "LogGroupArn":{ + "type":"string", + "max":1600, + "min":0, + "pattern":"^arn:(?:aws|aws-cn):log-group:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:role\\/(.+)$" + }, + "LoggingConfiguration":{ + "type":"structure", + "members":{ + "Destinations":{ + "shape":"LogDestinations", + "documentation":"

A list of the locations where SimSpace Weaver sends simulation log data.

" + } + }, + "documentation":"

The logging configuration for a simulation.

" + }, + "NonEmptyString":{ + "type":"string", + "max":1600, + "min":1 + }, + "ObjectKey":{ + "type":"string", + "max":1024, + "min":1 + }, + "OptionalString":{"type":"string"}, + "PortNumber":{ + "type":"integer", + "box":true, + "max":65535, + "min":0 + }, + "PositiveInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "RoleArn":{ + "type":"string", + "max":1600, + "min":0, + "pattern":"^arn:(?:aws|aws-cn):iam::(\\d{12})?:role\\/(.+)$" + }, + "S3Location":{ + "type":"structure", + "members":{ + "BucketName":{ + "shape":"BucketName", + "documentation":"

The name of an Amazon S3 bucket. For more information about buckets, see Creating, configuring, and working with Amazon S3 buckets in the Amazon Simple Storage Service User Guide.

" + }, + "ObjectKey":{ + "shape":"ObjectKey", + "documentation":"

The key name of an object in Amazon S3. For more information about Amazon S3 objects and object keys, see Uploading, downloading, and working with objects in Amazon S3 in the Amazon Simple Storage Service User Guide.

" + } + }, + "documentation":"

A location in Amazon Simple Storage Service (Amazon S3) where SimSpace Weaver stores simulation data, such as your app zip files and schema file. For more information about Amazon S3, see the Amazon Simple Storage Service User Guide .

" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SimSpaceWeaverArn":{ + "type":"string", + "max":1600, + "min":0, + "pattern":"^arn:(?:aws|aws-cn):simspaceweaver:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:([a-z]+)\\/(.+)$" + }, + "SimSpaceWeaverResourceName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9_.-]+$" + }, + "SimulationAppEndpointInfo":{ + "type":"structure", + "members":{ + "Address":{ + "shape":"NonEmptyString", + "documentation":"

The IP address of the app. SimSpace Weaver dynamically assigns this IP address when the app starts.

" + }, + "IngressPortMappings":{ + "shape":"AppPortMappings", + "documentation":"

The inbound TCP/UDP port numbers of the app. The combination of an IP address and a port number form a network endpoint.

" + } + }, + "documentation":"

Information about the network endpoint that you can use to connect to your custom or service app.

" + }, + "SimulationAppList":{ + "type":"list", + "member":{"shape":"SimulationAppMetadata"} + }, + "SimulationAppMetadata":{ + "type":"structure", + "members":{ + "Domain":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The domain of the app. For more information about domains, see Key concepts in the Amazon Web Services SimSpace Weaver User Guide.

" + }, + "Name":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the app.

" + }, + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation of the app.

" + }, + "Status":{ + "shape":"SimulationAppStatus", + "documentation":"

The current status of the app.

" + }, + "TargetStatus":{ + "shape":"SimulationAppTargetStatus", + "documentation":"

The desired status of the app.

" + } + }, + "documentation":"

A collection of metadata about an app.

" + }, + "SimulationAppPortMapping":{ + "type":"structure", + "members":{ + "Actual":{ + "shape":"PortNumber", + "documentation":"

The TCP/UDP port number of the running app. SimSpace Weaver dynamically assigns this port number when the app starts. SimSpace Weaver maps the Declared port to the Actual port. Clients connect to the app using the app's IP address and the Actual port number.

" + }, + "Declared":{ + "shape":"PortNumber", + "documentation":"

The TCP/UDP port number of the app, declared in the simulation schema. SimSpace Weaver maps the Declared port to the Actual port. The source code for the app should bind to the Declared port.

" + } + }, + "documentation":"

A collection of TCP/UDP ports for a custom or service app.

" + }, + "SimulationAppStatus":{ + "type":"string", + "enum":[ + "STARTING", + "STARTED", + "STOPPING", + "STOPPED", + "ERROR", + "UNKNOWN" + ] + }, + "SimulationAppTargetStatus":{ + "type":"string", + "enum":[ + "UNKNOWN", + "STARTED", + "STOPPED" + ] + }, + "SimulationClock":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"ClockStatus", + "documentation":"

The current status of the simulation clock.

" + }, + "TargetStatus":{ + "shape":"ClockTargetStatus", + "documentation":"

The desired status of the simulation clock.

" + } + }, + "documentation":"

Status information about the simulation clock.

" + }, + "SimulationClockList":{ + "type":"list", + "member":{"shape":"SimulationClock"} + }, + "SimulationList":{ + "type":"list", + "member":{"shape":"SimulationMetadata"} + }, + "SimulationMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"SimSpaceWeaverArn", + "documentation":"

The Amazon Resource Name (ARN) of the simulation. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the simulation was created, expressed as the number of seconds and milliseconds in UTC since the Unix epoch (0:0:0.000, January 1, 1970).

" + }, + "Name":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation.

" + }, + "Status":{ + "shape":"SimulationStatus", + "documentation":"

The current status of the simulation.

" + }, + "TargetStatus":{ + "shape":"SimulationTargetStatus", + "documentation":"

The desired status of the simulation.

" + } + }, + "documentation":"

A collection of data about the simulation.

" + }, + "SimulationStatus":{ + "type":"string", + "enum":[ + "UNKNOWN", + "STARTING", + "STARTED", + "STOPPING", + "STOPPED", + "FAILED", + "DELETING", + "DELETED" + ] + }, + "SimulationTargetStatus":{ + "type":"string", + "enum":[ + "UNKNOWN", + "STARTED", + "STOPPED", + "DELETED" + ] + }, + "StartAppInput":{ + "type":"structure", + "required":[ + "Domain", + "Name", + "Simulation" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A value that you provide to ensure that repeated calls to this API operation using the same parameters complete only once. A ClientToken is also known as an idempotency token. A ClientToken expires after 24 hours.

", + "idempotencyToken":true + }, + "Description":{ + "shape":"Description", + "documentation":"

The description of the app.

" + }, + "Domain":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the domain of the app.

" + }, + "LaunchOverrides":{"shape":"LaunchOverrides"}, + "Name":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the app.

" + }, + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation of the app.

" + } + } + }, + "StartAppOutput":{ + "type":"structure", + "members":{ + "Domain":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the domain of the app.

" + }, + "Name":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the app.

" + }, + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation of the app.

" + } + } + }, + "StartClockInput":{ + "type":"structure", + "required":["Simulation"], + "members":{ + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation.

" + } + } + }, + "StartClockOutput":{ + "type":"structure", + "members":{ + } + }, + "StartSimulationInput":{ + "type":"structure", + "required":[ + "Name", + "RoleArn", + "SchemaS3Location" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A value that you provide to ensure that repeated calls to this API operation using the same parameters complete only once. A ClientToken is also known as an idempotency token. A ClientToken expires after 24 hours.

", + "idempotencyToken":true + }, + "Description":{ + "shape":"Description", + "documentation":"

The description of the simulation.

" + }, + "MaximumDuration":{ + "shape":"TimeToLiveString", + "documentation":"

The maximum running time of the simulation, specified as a number of months (m or M), hours (h or H), or days (d or D). The simulation stops when it reaches this limit.

" + }, + "Name":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that the simulation assumes to perform actions. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference. For more information about IAM roles, see IAM roles in the Identity and Access Management User Guide.

" + }, + "SchemaS3Location":{ + "shape":"S3Location", + "documentation":"

The location of the simulation schema in Amazon Simple Storage Service (Amazon S3). For more information about Amazon S3, see the Amazon Simple Storage Service User Guide .

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

A list of tags for the simulation. For more information about tags, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

" + } + } + }, + "StartSimulationOutput":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"SimSpaceWeaverArn", + "documentation":"

The Amazon Resource Name (ARN) of the simulation. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the simulation was created, expressed as the number of seconds and milliseconds in UTC since the Unix epoch (0:0:0.000, January 1, 1970).

" + }, + "ExecutionId":{ + "shape":"UUID", + "documentation":"

A universally unique identifier (UUID) for this simulation.

" + } + } + }, + "StopAppInput":{ + "type":"structure", + "required":[ + "App", + "Domain", + "Simulation" + ], + "members":{ + "App":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the app.

" + }, + "Domain":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the domain of the app.

" + }, + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation of the app.

" + } + } + }, + "StopAppOutput":{ + "type":"structure", + "members":{ + } + }, + "StopClockInput":{ + "type":"structure", + "required":["Simulation"], + "members":{ + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation.

" + } + } + }, + "StopClockOutput":{ + "type":"structure", + "members":{ + } + }, + "StopSimulationInput":{ + "type":"structure", + "required":["Simulation"], + "members":{ + "Simulation":{ + "shape":"SimSpaceWeaverResourceName", + "documentation":"

The name of the simulation.

" + } + } + }, + "StopSimulationOutput":{ + "type":"structure", + "members":{ + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"SimSpaceWeaverArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to add tags to. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

A list of tags to apply to the resource.

" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TimeToLiveString":{ + "type":"string", + "max":6, + "min":2, + "pattern":"^\\d{1,5}[mhdMHD]$" + }, + "Timestamp":{"type":"timestamp"}, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UUID":{ + "type":"string", + "min":36, + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"SimSpaceWeaverArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to remove tags from. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

A list of tag keys to remove from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

Amazon Web Services SimSpace Weaver (SimSpace Weaver) is a managed service that you can use to build and operate large-scale spatial simulations in the Amazon Web Services Cloud. For example, you can create a digital twin of a city, crowd simulations with millions of people and objects, and massilvely-multiplayer games with hundreds of thousands of connected players. For more information about SimSpace Weaver, see the Amazon Web Services SimSpace Weaver User Guide .

This API reference describes the API operations and data types that you can use to communicate directly with SimSpace Weaver.

SimSpace Weaver also provides the SimSpace Weaver app SDK, which you use for app development. The SimSpace Weaver app SDK API reference is included in the SimSpace Weaver app SDK documentation, which is part of the SimSpace Weaver app SDK distributable package.

" +} diff --git a/tests/functional/endpoint-rules/firehose/endpoint-tests-1.json b/tests/functional/endpoint-rules/firehose/endpoint-tests-1.json index 349f290a31..ab71fd49ec 100644 --- a/tests/functional/endpoint-rules/firehose/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/firehose/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-south-1" } }, { @@ -21,9 +21,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-south-1" } }, { @@ -34,9 +34,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-south-1" } }, { @@ -47,9 +47,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-south-1" } }, { @@ -60,9 +60,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-1" } }, { @@ -73,9 +73,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-1" } }, { @@ -86,9 +86,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-1" } }, { @@ -99,9 +99,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-1" } }, { @@ -112,9 +112,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-2" } }, { @@ -125,9 +125,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-2" } }, { @@ -138,9 +138,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-2" } }, { @@ -151,9 +151,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-2" } }, { @@ -164,9 +164,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-east-1" } }, { @@ -177,9 +177,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-east-1" } }, { @@ -190,9 +190,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-east-1" } }, { @@ -203,9 +203,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-east-1" } }, { @@ -216,9 +216,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-central-1" } }, { @@ -229,9 +229,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-central-1" } }, { @@ -242,9 +242,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-central-1" } }, { @@ -255,9 +255,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-central-1" } }, { @@ -268,9 +268,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ca-central-1" } }, { @@ -281,9 +281,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ca-central-1" } }, { @@ -294,9 +294,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ca-central-1" } }, { @@ -307,9 +307,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ca-central-1" } }, { @@ -320,9 +320,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-1" } }, { @@ -333,9 +333,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-1" } }, { @@ -346,9 +346,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-1" } }, { @@ -359,9 +359,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-1" } }, { @@ -370,9 +370,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-west-1" } }, { @@ -383,9 +383,9 @@ } }, "params": { - "Region": "us-iso-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-west-1" } }, { @@ -394,9 +394,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-west-1" } }, { @@ -407,9 +407,9 @@ } }, "params": { - "Region": "us-iso-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-west-1" } }, { @@ -420,9 +420,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-1" } }, { @@ -433,9 +433,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-1" } }, { @@ -446,9 +446,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-1" } }, { @@ -459,9 +459,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-1" } }, { @@ -472,9 +472,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-2" } }, { @@ -485,9 +485,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-2" } }, { @@ -498,9 +498,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-2" } }, { @@ -511,9 +511,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-2" } }, { @@ -524,9 +524,9 @@ } }, "params": { - "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "af-south-1" } }, { @@ -537,9 +537,9 @@ } }, "params": { - "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "af-south-1" } }, { @@ -550,9 +550,9 @@ } }, "params": { - "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "af-south-1" } }, { @@ -563,9 +563,9 @@ } }, "params": { - "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "af-south-1" } }, { @@ -576,9 +576,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-north-1" } }, { @@ -589,9 +589,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-north-1" } }, { @@ -602,9 +602,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-north-1" } }, { @@ -615,9 +615,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-north-1" } }, { @@ -628,9 +628,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-3" } }, { @@ -641,9 +641,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-3" } }, { @@ -654,9 +654,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-3" } }, { @@ -667,9 +667,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-3" } }, { @@ -680,9 +680,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-2" } }, { @@ -693,9 +693,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-2" } }, { @@ -706,9 +706,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-2" } }, { @@ -719,9 +719,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-2" } }, { @@ -732,9 +732,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-1" } }, { @@ -745,9 +745,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-1" } }, { @@ -758,9 +758,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-1" } }, { @@ -771,9 +771,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-1" } }, { @@ -784,9 +784,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-3" } }, { @@ -797,9 +797,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-3" } }, { @@ -810,9 +810,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-3" } }, { @@ -823,9 +823,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-3" } }, { @@ -836,9 +836,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-2" } }, { @@ -849,9 +849,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-2" } }, { @@ -862,9 +862,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-2" } }, { @@ -875,9 +875,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-2" } }, { @@ -888,9 +888,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-1" } }, { @@ -901,9 +901,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-1" } }, { @@ -914,9 +914,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-1" } }, { @@ -927,9 +927,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-1" } }, { @@ -940,9 +940,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-south-1" } }, { @@ -953,9 +953,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-south-1" } }, { @@ -966,9 +966,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-south-1" } }, { @@ -979,9 +979,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-south-1" } }, { @@ -992,9 +992,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "sa-east-1" } }, { @@ -1005,9 +1005,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "sa-east-1" } }, { @@ -1018,9 +1018,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "sa-east-1" } }, { @@ -1031,9 +1031,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "sa-east-1" } }, { @@ -1044,9 +1044,9 @@ } }, "params": { - "Region": "ap-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-east-1" } }, { @@ -1057,9 +1057,9 @@ } }, "params": { - "Region": "ap-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-east-1" } }, { @@ -1070,9 +1070,9 @@ } }, "params": { - "Region": "ap-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-east-1" } }, { @@ -1083,9 +1083,9 @@ } }, "params": { - "Region": "ap-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-east-1" } }, { @@ -1096,9 +1096,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-north-1" } }, { @@ -1109,9 +1109,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-north-1" } }, { @@ -1122,9 +1122,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-north-1" } }, { @@ -1135,9 +1135,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-north-1" } }, { @@ -1148,9 +1148,9 @@ } }, "params": { - "Region": "us-gov-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-west-1" } }, { @@ -1161,9 +1161,9 @@ } }, "params": { - "Region": "us-gov-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-west-1" } }, { @@ -1174,9 +1174,9 @@ } }, "params": { - "Region": "us-gov-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-west-1" } }, { @@ -1187,9 +1187,9 @@ } }, "params": { - "Region": "us-gov-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-west-1" } }, { @@ -1200,9 +1200,9 @@ } }, "params": { - "Region": "ap-southeast-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-1" } }, { @@ -1213,9 +1213,9 @@ } }, "params": { - "Region": "ap-southeast-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-1" } }, { @@ -1226,9 +1226,9 @@ } }, "params": { - "Region": "ap-southeast-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-1" } }, { @@ -1239,9 +1239,9 @@ } }, "params": { - "Region": "ap-southeast-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-1" } }, { @@ -1252,9 +1252,9 @@ } }, "params": { - "Region": "ap-southeast-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-2" } }, { @@ -1265,9 +1265,9 @@ } }, "params": { - "Region": "ap-southeast-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-2" } }, { @@ -1278,9 +1278,9 @@ } }, "params": { - "Region": "ap-southeast-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-2" } }, { @@ -1291,9 +1291,9 @@ } }, "params": { - "Region": "ap-southeast-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-2" } }, { @@ -1302,9 +1302,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-east-1" } }, { @@ -1315,9 +1315,9 @@ } }, "params": { - "Region": "us-iso-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-east-1" } }, { @@ -1326,9 +1326,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-east-1" } }, { @@ -1339,9 +1339,9 @@ } }, "params": { - "Region": "us-iso-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-east-1" } }, { @@ -1352,9 +1352,9 @@ } }, "params": { - "Region": "ap-southeast-3", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-3" } }, { @@ -1365,9 +1365,9 @@ } }, "params": { - "Region": "ap-southeast-3", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-3" } }, { @@ -1378,9 +1378,9 @@ } }, "params": { - "Region": "ap-southeast-3", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-3" } }, { @@ -1391,9 +1391,9 @@ } }, "params": { - "Region": "ap-southeast-3", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-3" } }, { @@ -1404,9 +1404,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -1417,9 +1417,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -1430,9 +1430,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -1443,9 +1443,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -1456,9 +1456,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-2" } }, { @@ -1469,9 +1469,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-2" } }, { @@ -1482,9 +1482,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-2" } }, { @@ -1495,9 +1495,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-2" } }, { @@ -1508,9 +1508,9 @@ } }, "params": { - "Region": "cn-northwest-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-northwest-1" } }, { @@ -1521,9 +1521,9 @@ } }, "params": { - "Region": "cn-northwest-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-northwest-1" } }, { @@ -1534,9 +1534,9 @@ } }, "params": { - "Region": "cn-northwest-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-northwest-1" } }, { @@ -1547,9 +1547,9 @@ } }, "params": { - "Region": "cn-northwest-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-northwest-1" } }, { @@ -1558,9 +1558,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-isob-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-isob-east-1" } }, { @@ -1571,9 +1571,9 @@ } }, "params": { - "Region": "us-isob-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-isob-east-1" } }, { @@ -1582,9 +1582,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-isob-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-isob-east-1" } }, { @@ -1595,9 +1595,9 @@ } }, "params": { - "Region": "us-isob-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-isob-east-1" } }, { @@ -1608,9 +1608,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -1620,9 +1620,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -1632,9 +1632,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", "UseDualStack": true, "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } } diff --git a/tests/functional/endpoint-rules/kms/endpoint-tests-1.json b/tests/functional/endpoint-rules/kms/endpoint-tests-1.json index d393f78de1..26c6afdc81 100644 --- a/tests/functional/endpoint-rules/kms/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/kms/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { - "Region": "ap-south-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-south-2" } }, { @@ -21,9 +21,9 @@ } }, "params": { - "Region": "ap-south-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-south-2" } }, { @@ -34,9 +34,9 @@ } }, "params": { - "Region": "ap-south-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-south-2" } }, { @@ -47,9 +47,9 @@ } }, "params": { - "Region": "ap-south-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-south-2" } }, { @@ -60,9 +60,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-south-1" } }, { @@ -73,9 +73,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-south-1" } }, { @@ -86,9 +86,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-south-1" } }, { @@ -99,9 +99,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-south-1" } }, { @@ -112,9 +112,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-1" } }, { @@ -125,9 +125,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-1" } }, { @@ -138,9 +138,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-1" } }, { @@ -151,9 +151,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-1" } }, { @@ -164,9 +164,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-2" } }, { @@ -177,9 +177,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-2" } }, { @@ -190,9 +190,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-2" } }, { @@ -203,9 +203,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-2" } }, { @@ -216,9 +216,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-east-1" } }, { @@ -229,9 +229,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-east-1" } }, { @@ -242,9 +242,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-east-1" } }, { @@ -255,9 +255,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-east-1" } }, { @@ -268,9 +268,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-central-1" } }, { @@ -281,9 +281,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-central-1" } }, { @@ -294,9 +294,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-central-1" } }, { @@ -307,9 +307,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-central-1" } }, { @@ -320,9 +320,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ca-central-1" } }, { @@ -333,9 +333,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ca-central-1" } }, { @@ -346,9 +346,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ca-central-1" } }, { @@ -359,9 +359,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ca-central-1" } }, { @@ -372,9 +372,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-1" } }, { @@ -385,9 +385,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-1" } }, { @@ -398,9 +398,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-1" } }, { @@ -411,9 +411,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-1" } }, { @@ -422,9 +422,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-west-1" } }, { @@ -435,9 +435,9 @@ } }, "params": { - "Region": "us-iso-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-west-1" } }, { @@ -446,9 +446,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-west-1" } }, { @@ -459,9 +459,9 @@ } }, "params": { - "Region": "us-iso-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-west-1" } }, { @@ -472,9 +472,9 @@ } }, "params": { - "Region": "eu-central-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-2" } }, { @@ -485,9 +485,9 @@ } }, "params": { - "Region": "eu-central-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-2" } }, { @@ -498,9 +498,9 @@ } }, "params": { - "Region": "eu-central-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-2" } }, { @@ -511,9 +511,9 @@ } }, "params": { - "Region": "eu-central-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-2" } }, { @@ -524,9 +524,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-1" } }, { @@ -537,9 +537,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-1" } }, { @@ -550,9 +550,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-1" } }, { @@ -563,9 +563,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-1" } }, { @@ -576,9 +576,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-2" } }, { @@ -589,9 +589,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-2" } }, { @@ -602,9 +602,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-2" } }, { @@ -615,9 +615,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-2" } }, { @@ -628,9 +628,9 @@ } }, "params": { - "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "af-south-1" } }, { @@ -641,9 +641,9 @@ } }, "params": { - "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "af-south-1" } }, { @@ -654,9 +654,9 @@ } }, "params": { - "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "af-south-1" } }, { @@ -667,9 +667,9 @@ } }, "params": { - "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "af-south-1" } }, { @@ -680,9 +680,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-north-1" } }, { @@ -693,9 +693,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-north-1" } }, { @@ -706,9 +706,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-north-1" } }, { @@ -719,9 +719,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-north-1" } }, { @@ -732,9 +732,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-3" } }, { @@ -745,9 +745,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-3" } }, { @@ -758,9 +758,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-3" } }, { @@ -771,9 +771,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-3" } }, { @@ -784,9 +784,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-2" } }, { @@ -797,9 +797,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-2" } }, { @@ -810,9 +810,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-2" } }, { @@ -823,9 +823,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-2" } }, { @@ -836,9 +836,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-1" } }, { @@ -849,9 +849,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-1" } }, { @@ -862,9 +862,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-1" } }, { @@ -875,9 +875,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-1" } }, { @@ -888,9 +888,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-3" } }, { @@ -901,9 +901,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-3" } }, { @@ -914,9 +914,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-3" } }, { @@ -927,9 +927,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-3" } }, { @@ -940,9 +940,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-2" } }, { @@ -953,9 +953,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-2" } }, { @@ -966,9 +966,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-2" } }, { @@ -979,9 +979,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-2" } }, { @@ -992,9 +992,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-1" } }, { @@ -1005,9 +1005,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-1" } }, { @@ -1018,9 +1018,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-1" } }, { @@ -1031,9 +1031,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-1" } }, { @@ -1044,9 +1044,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-south-1" } }, { @@ -1057,9 +1057,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-south-1" } }, { @@ -1070,9 +1070,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-south-1" } }, { @@ -1083,9 +1083,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-south-1" } }, { @@ -1096,9 +1096,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "sa-east-1" } }, { @@ -1109,9 +1109,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "sa-east-1" } }, { @@ -1122,9 +1122,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "sa-east-1" } }, { @@ -1135,9 +1135,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "sa-east-1" } }, { @@ -1148,9 +1148,9 @@ } }, "params": { - "Region": "ap-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-east-1" } }, { @@ -1161,9 +1161,9 @@ } }, "params": { - "Region": "ap-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-east-1" } }, { @@ -1174,9 +1174,9 @@ } }, "params": { - "Region": "ap-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-east-1" } }, { @@ -1187,9 +1187,9 @@ } }, "params": { - "Region": "ap-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-east-1" } }, { @@ -1200,9 +1200,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-north-1" } }, { @@ -1213,9 +1213,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-north-1" } }, { @@ -1226,9 +1226,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-north-1" } }, { @@ -1239,9 +1239,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-north-1" } }, { @@ -1252,9 +1252,9 @@ } }, "params": { - "Region": "us-gov-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-west-1" } }, { @@ -1265,9 +1265,9 @@ } }, "params": { - "Region": "us-gov-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-west-1" } }, { @@ -1278,9 +1278,9 @@ } }, "params": { - "Region": "us-gov-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-west-1" } }, { @@ -1291,9 +1291,9 @@ } }, "params": { - "Region": "us-gov-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-west-1" } }, { @@ -1304,9 +1304,9 @@ } }, "params": { - "Region": "ap-southeast-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-1" } }, { @@ -1317,9 +1317,9 @@ } }, "params": { - "Region": "ap-southeast-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-1" } }, { @@ -1330,9 +1330,9 @@ } }, "params": { - "Region": "ap-southeast-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-1" } }, { @@ -1343,9 +1343,9 @@ } }, "params": { - "Region": "ap-southeast-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-1" } }, { @@ -1356,9 +1356,9 @@ } }, "params": { - "Region": "ap-southeast-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-2" } }, { @@ -1369,9 +1369,9 @@ } }, "params": { - "Region": "ap-southeast-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-2" } }, { @@ -1382,9 +1382,9 @@ } }, "params": { - "Region": "ap-southeast-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-2" } }, { @@ -1395,9 +1395,9 @@ } }, "params": { - "Region": "ap-southeast-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-2" } }, { @@ -1406,9 +1406,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-east-1" } }, { @@ -1419,9 +1419,9 @@ } }, "params": { - "Region": "us-iso-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-east-1" } }, { @@ -1430,9 +1430,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-east-1" } }, { @@ -1443,9 +1443,9 @@ } }, "params": { - "Region": "us-iso-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-east-1" } }, { @@ -1456,9 +1456,9 @@ } }, "params": { - "Region": "ap-southeast-3", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-3" } }, { @@ -1469,9 +1469,9 @@ } }, "params": { - "Region": "ap-southeast-3", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-3" } }, { @@ -1482,9 +1482,9 @@ } }, "params": { - "Region": "ap-southeast-3", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-3" } }, { @@ -1495,9 +1495,9 @@ } }, "params": { - "Region": "ap-southeast-3", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-3" } }, { @@ -1508,9 +1508,9 @@ } }, "params": { - "Region": "ap-southeast-4", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-4" } }, { @@ -1521,9 +1521,9 @@ } }, "params": { - "Region": "ap-southeast-4", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-4" } }, { @@ -1534,9 +1534,9 @@ } }, "params": { - "Region": "ap-southeast-4", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-4" } }, { @@ -1547,9 +1547,9 @@ } }, "params": { - "Region": "ap-southeast-4", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-4" } }, { @@ -1560,9 +1560,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -1573,9 +1573,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -1586,9 +1586,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -1599,9 +1599,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -1612,9 +1612,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-2" } }, { @@ -1625,9 +1625,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-2" } }, { @@ -1638,9 +1638,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-2" } }, { @@ -1651,9 +1651,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-2" } }, { @@ -1664,9 +1664,9 @@ } }, "params": { - "Region": "cn-northwest-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-northwest-1" } }, { @@ -1677,9 +1677,9 @@ } }, "params": { - "Region": "cn-northwest-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-northwest-1" } }, { @@ -1690,9 +1690,9 @@ } }, "params": { - "Region": "cn-northwest-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-northwest-1" } }, { @@ -1703,9 +1703,9 @@ } }, "params": { - "Region": "cn-northwest-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-northwest-1" } }, { @@ -1714,9 +1714,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-isob-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-isob-east-1" } }, { @@ -1727,9 +1727,9 @@ } }, "params": { - "Region": "us-isob-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-isob-east-1" } }, { @@ -1738,9 +1738,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-isob-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-isob-east-1" } }, { @@ -1751,9 +1751,9 @@ } }, "params": { - "Region": "us-isob-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-isob-east-1" } }, { @@ -1764,9 +1764,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -1776,9 +1776,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -1788,9 +1788,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", "UseDualStack": true, "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } } diff --git a/tests/functional/endpoint-rules/omics/endpoint-tests-1.json b/tests/functional/endpoint-rules/omics/endpoint-tests-1.json new file mode 100644 index 0000000000..485293e391 --- /dev/null +++ b/tests/functional/endpoint-rules/omics/endpoint-tests-1.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://omics-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://omics.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://omics-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://omics.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://omics-fips.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://omics.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://omics.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/endpoint-rules/opensearchserverless/endpoint-tests-1.json b/tests/functional/endpoint-rules/opensearchserverless/endpoint-tests-1.json new file mode 100644 index 0000000000..c00d2d53e4 --- /dev/null +++ b/tests/functional/endpoint-rules/opensearchserverless/endpoint-tests-1.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aoss-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aoss.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aoss-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aoss.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aoss-fips.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aoss.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aoss.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/endpoint-rules/securitylake/endpoint-tests-1.json b/tests/functional/endpoint-rules/securitylake/endpoint-tests-1.json new file mode 100644 index 0000000000..44e4b725ce --- /dev/null +++ b/tests/functional/endpoint-rules/securitylake/endpoint-tests-1.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://securitylake-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://securitylake.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://securitylake-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://securitylake.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://securitylake-fips.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://securitylake.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://securitylake.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/endpoint-rules/simspaceweaver/endpoint-tests-1.json b/tests/functional/endpoint-rules/simspaceweaver/endpoint-tests-1.json new file mode 100644 index 0000000000..38383d6e5e --- /dev/null +++ b/tests/functional/endpoint-rules/simspaceweaver/endpoint-tests-1.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver-fips.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://simspaceweaver.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file From 1169a631c5d9f92cb0b1edd4bc96c0556eb49622 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 29 Nov 2022 19:07:42 +0000 Subject: [PATCH 2/3] Update to latest partitions and endpoints --- botocore/data/endpoints.json | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 87f8600e32..85cf3fc330 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -12813,6 +12813,17 @@ } } }, + "securitylake" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "serverlessrepo" : { "defaults" : { "protocols" : [ "https" ] @@ -13245,6 +13256,18 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-global" }, + "simspaceweaver" : { + "endpoints" : { + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "sms" : { "endpoints" : { "af-south-1" : { }, From 5d76037b33b5ccf4f47715a6972f068c50ff1346 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 29 Nov 2022 19:07:43 +0000 Subject: [PATCH 3/3] Bumping version to 1.29.19 --- .changes/1.29.19.json | 37 +++++++++++++++++++ .../next-release/api-change-ec2-70430.json | 5 --- .../api-change-firehose-73695.json | 5 --- .../next-release/api-change-kms-94096.json | 5 --- .../next-release/api-change-omics-68242.json | 5 --- ...api-change-opensearchserverless-71986.json | 5 --- .../api-change-securitylake-57328.json | 5 --- .../api-change-simspaceweaver-88091.json | 5 --- CHANGELOG.rst | 12 ++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 11 files changed, 51 insertions(+), 37 deletions(-) create mode 100644 .changes/1.29.19.json delete mode 100644 .changes/next-release/api-change-ec2-70430.json delete mode 100644 .changes/next-release/api-change-firehose-73695.json delete mode 100644 .changes/next-release/api-change-kms-94096.json delete mode 100644 .changes/next-release/api-change-omics-68242.json delete mode 100644 .changes/next-release/api-change-opensearchserverless-71986.json delete mode 100644 .changes/next-release/api-change-securitylake-57328.json delete mode 100644 .changes/next-release/api-change-simspaceweaver-88091.json diff --git a/.changes/1.29.19.json b/.changes/1.29.19.json new file mode 100644 index 0000000000..7449841e96 --- /dev/null +++ b/.changes/1.29.19.json @@ -0,0 +1,37 @@ +[ + { + "category": "``ec2``", + "description": "This release adds support for AWS Verified Access and the Hpc6id Amazon EC2 compute optimized instance type, which features 3rd generation Intel Xeon Scalable processors.", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "Allow support for the Serverless offering for Amazon OpenSearch Service as a Kinesis Data Firehose delivery destination.", + "type": "api-change" + }, + { + "category": "``kms``", + "description": "AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control.", + "type": "api-change" + }, + { + "category": "``omics``", + "description": "Amazon Omics is a new, purpose-built service that can be used by healthcare and life science organizations to store, query, and analyze omics data. The insights from that data can be used to accelerate scientific discoveries and improve healthcare.", + "type": "api-change" + }, + { + "category": "``opensearchserverless``", + "description": "Publish SDK for Amazon OpenSearch Serverless", + "type": "api-change" + }, + { + "category": "``securitylake``", + "description": "Amazon Security Lake automatically centralizes security data from cloud, on-premises, and custom sources into a purpose-built data lake stored in your account. Security Lake makes it easier to analyze security data, so you can improve the protection of your workloads, applications, and data", + "type": "api-change" + }, + { + "category": "``simspaceweaver``", + "description": "AWS SimSpace Weaver is a new service that helps customers build spatial simulations at new levels of scale - resulting in virtual worlds with millions of dynamic entities. See the AWS SimSpace Weaver developer guide for more details on how to get started. https://docs.aws.amazon.com/simspaceweaver", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-ec2-70430.json b/.changes/next-release/api-change-ec2-70430.json deleted file mode 100644 index 9ab7befe4a..0000000000 --- a/.changes/next-release/api-change-ec2-70430.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``ec2``", - "description": "This release adds support for AWS Verified Access and the Hpc6id Amazon EC2 compute optimized instance type, which features 3rd generation Intel Xeon Scalable processors." -} diff --git a/.changes/next-release/api-change-firehose-73695.json b/.changes/next-release/api-change-firehose-73695.json deleted file mode 100644 index a831b3e4a7..0000000000 --- a/.changes/next-release/api-change-firehose-73695.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``firehose``", - "description": "Allow support for the Serverless offering for Amazon OpenSearch Service as a Kinesis Data Firehose delivery destination." -} diff --git a/.changes/next-release/api-change-kms-94096.json b/.changes/next-release/api-change-kms-94096.json deleted file mode 100644 index c302c8ffb0..0000000000 --- a/.changes/next-release/api-change-kms-94096.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``kms``", - "description": "AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control." -} diff --git a/.changes/next-release/api-change-omics-68242.json b/.changes/next-release/api-change-omics-68242.json deleted file mode 100644 index 23ec3df4f4..0000000000 --- a/.changes/next-release/api-change-omics-68242.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``omics``", - "description": "Amazon Omics is a new, purpose-built service that can be used by healthcare and life science organizations to store, query, and analyze omics data. The insights from that data can be used to accelerate scientific discoveries and improve healthcare." -} diff --git a/.changes/next-release/api-change-opensearchserverless-71986.json b/.changes/next-release/api-change-opensearchserverless-71986.json deleted file mode 100644 index 598586a8bf..0000000000 --- a/.changes/next-release/api-change-opensearchserverless-71986.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``opensearchserverless``", - "description": "Publish SDK for Amazon OpenSearch Serverless" -} diff --git a/.changes/next-release/api-change-securitylake-57328.json b/.changes/next-release/api-change-securitylake-57328.json deleted file mode 100644 index b44bcd20d7..0000000000 --- a/.changes/next-release/api-change-securitylake-57328.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``securitylake``", - "description": "Amazon Security Lake automatically centralizes security data from cloud, on-premises, and custom sources into a purpose-built data lake stored in your account. Security Lake makes it easier to analyze security data, so you can improve the protection of your workloads, applications, and data" -} diff --git a/.changes/next-release/api-change-simspaceweaver-88091.json b/.changes/next-release/api-change-simspaceweaver-88091.json deleted file mode 100644 index 71d1c2103e..0000000000 --- a/.changes/next-release/api-change-simspaceweaver-88091.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``simspaceweaver``", - "description": "AWS SimSpace Weaver is a new service that helps customers build spatial simulations at new levels of scale - resulting in virtual worlds with millions of dynamic entities. See the AWS SimSpace Weaver developer guide for more details on how to get started. https://docs.aws.amazon.com/simspaceweaver" -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 76711c12f6..71d043e671 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,18 @@ CHANGELOG ========= +1.29.19 +======= + +* api-change:``ec2``: This release adds support for AWS Verified Access and the Hpc6id Amazon EC2 compute optimized instance type, which features 3rd generation Intel Xeon Scalable processors. +* api-change:``firehose``: Allow support for the Serverless offering for Amazon OpenSearch Service as a Kinesis Data Firehose delivery destination. +* api-change:``kms``: AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control. +* api-change:``omics``: Amazon Omics is a new, purpose-built service that can be used by healthcare and life science organizations to store, query, and analyze omics data. The insights from that data can be used to accelerate scientific discoveries and improve healthcare. +* api-change:``opensearchserverless``: Publish SDK for Amazon OpenSearch Serverless +* api-change:``securitylake``: Amazon Security Lake automatically centralizes security data from cloud, on-premises, and custom sources into a purpose-built data lake stored in your account. Security Lake makes it easier to analyze security data, so you can improve the protection of your workloads, applications, and data +* api-change:``simspaceweaver``: AWS SimSpace Weaver is a new service that helps customers build spatial simulations at new levels of scale - resulting in virtual worlds with millions of dynamic entities. See the AWS SimSpace Weaver developer guide for more details on how to get started. https://docs.aws.amazon.com/simspaceweaver + + 1.29.18 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 2200e5b201..3a484443f4 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.29.18' +__version__ = '1.29.19' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index e37947861e..4ac052eece 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ # The short X.Y version. version = '1.29.' # The full version, including alpha/beta/rc tags. -release = '1.29.18' +release = '1.29.19' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.