diff --git a/.changes/1.31.38.json b/.changes/1.31.38.json new file mode 100644 index 0000000000..8bb4506ac9 --- /dev/null +++ b/.changes/1.31.38.json @@ -0,0 +1,47 @@ +[ + { + "category": "``appflow``", + "description": "Add SAP source connector parallel and pagination feature", + "type": "api-change" + }, + { + "category": "``apprunner``", + "description": "App Runner adds support for Bitbucket. You can now create App Runner connection that connects to your Bitbucket repositories and deploy App Runner service with the source code stored in a Bitbucket repository.", + "type": "api-change" + }, + { + "category": "``auditmanager``", + "description": "This release marks some assessment metadata as sensitive. We added a sensitive trait to the following attributes: assessmentName, emailAddress, scope, createdBy, lastUpdatedBy, and userName.", + "type": "api-change" + }, + { + "category": "``cleanrooms``", + "description": "This release decouples member abilities in a collaboration. With this change, the member who can run queries no longer needs to be the same as the member who can receive results.", + "type": "api-change" + }, + { + "category": "``datasync``", + "description": "AWS DataSync introduces Task Reports, a new feature that provides detailed reports of data transfer operations for each task execution.", + "type": "api-change" + }, + { + "category": "``neptunedata``", + "description": "Allows customers to execute data plane actions like bulk loading graphs, issuing graph queries using Gremlin and openCypher directly from the SDK.", + "type": "api-change" + }, + { + "category": "``network-firewall``", + "description": "Network Firewall increasing pagination token string length", + "type": "api-change" + }, + { + "category": "``pca-connector-ad``", + "description": "The Connector for AD allows you to use a fully-managed AWS Private CA as a drop-in replacement for your self-managed enterprise CAs without local agents or proxy servers. Enterprises that use AD to manage Windows environments can reduce their private certificate authority (CA) costs and complexity.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Amazon SageMaker Canvas adds IdentityProviderOAuthSettings support for CanvasAppSettings", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a77af80a6d..fa265dfce3 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,20 @@ CHANGELOG ========= +1.31.38 +======= + +* api-change:``appflow``: Add SAP source connector parallel and pagination feature +* api-change:``apprunner``: App Runner adds support for Bitbucket. You can now create App Runner connection that connects to your Bitbucket repositories and deploy App Runner service with the source code stored in a Bitbucket repository. +* api-change:``auditmanager``: This release marks some assessment metadata as sensitive. We added a sensitive trait to the following attributes: assessmentName, emailAddress, scope, createdBy, lastUpdatedBy, and userName. +* api-change:``cleanrooms``: This release decouples member abilities in a collaboration. With this change, the member who can run queries no longer needs to be the same as the member who can receive results. +* api-change:``datasync``: AWS DataSync introduces Task Reports, a new feature that provides detailed reports of data transfer operations for each task execution. +* api-change:``neptunedata``: Allows customers to execute data plane actions like bulk loading graphs, issuing graph queries using Gremlin and openCypher directly from the SDK. +* api-change:``network-firewall``: Network Firewall increasing pagination token string length +* api-change:``pca-connector-ad``: The Connector for AD allows you to use a fully-managed AWS Private CA as a drop-in replacement for your self-managed enterprise CAs without local agents or proxy servers. Enterprises that use AD to manage Windows environments can reduce their private certificate authority (CA) costs and complexity. +* api-change:``sagemaker``: Amazon SageMaker Canvas adds IdentityProviderOAuthSettings support for CanvasAppSettings + + 1.31.37 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 5e869a1fa5..84ec4dd2d2 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.31.37' +__version__ = '1.31.38' class NullHandler(logging.Handler): diff --git a/botocore/data/appflow/2020-08-23/endpoint-rule-set-1.json b/botocore/data/appflow/2020-08-23/endpoint-rule-set-1.json index 02056ce3eb..85ec7f0d71 100644 --- a/botocore/data/appflow/2020-08-23/endpoint-rule-set-1.json +++ b/botocore/data/appflow/2020-08-23/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://appflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://appflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://appflow-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://appflow-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://appflow.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://appflow.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://appflow.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://appflow.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/botocore/data/appflow/2020-08-23/service-2.json b/botocore/data/appflow/2020-08-23/service-2.json index e34290b519..d3130cb324 100644 --- a/botocore/data/appflow/2020-08-23/service-2.json +++ b/botocore/data/appflow/2020-08-23/service-2.json @@ -61,7 +61,8 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ConflictException"}, {"shape":"ConnectorAuthenticationException"}, - {"shape":"ConnectorServerException"} + {"shape":"ConnectorServerException"}, + {"shape":"AccessDeniedException"} ], "documentation":"
Enables your application to create a new flow using Amazon AppFlow. You must create a connector profile before calling this API. Please note that the Request Syntax below shows syntax for multiple destinations, however, you can only transfer data to one item in this list at a time. Amazon AppFlow does not currently support flows to multiple destinations at once.
" }, @@ -411,7 +412,8 @@ {"shape":"ConflictException"}, {"shape":"ConnectorAuthenticationException"}, {"shape":"ConnectorServerException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} ], "documentation":"Updates an existing flow.
" } @@ -2675,6 +2677,14 @@ "recordsProcessed":{ "shape":"Long", "documentation":"The number of records processed in the flow run.
" + }, + "numParallelProcesses":{ + "shape":"Long", + "documentation":"The number of processes that Amazon AppFlow ran at the same time when it retrieved your data.
" + }, + "maxPageSize":{ + "shape":"Long", + "documentation":"The maximum number of records that Amazon AppFlow receives in each page of the response from your SAP application.
" } }, "documentation":"Specifies the end result of the flow run.
" @@ -4329,18 +4339,60 @@ }, "documentation":"The properties that are applied when using SAPOData as a flow destination
" }, + "SAPODataMaxPageSize":{ + "type":"integer", + "max":10000, + "min":1 + }, + "SAPODataMaxParallelism":{ + "type":"integer", + "max":10, + "min":1 + }, "SAPODataMetadata":{ "type":"structure", "members":{ }, "documentation":"The connector metadata specific to SAPOData.
" }, + "SAPODataPaginationConfig":{ + "type":"structure", + "required":["maxPageSize"], + "members":{ + "maxPageSize":{ + "shape":"SAPODataMaxPageSize", + "documentation":"The maximum number of records that Amazon AppFlow receives in each page of the response from your SAP application. For transfers of OData records, the maximum page size is 3,000. For transfers of data that comes from an ODP provider, the maximum page size is 10,000.
", + "box":true + } + }, + "documentation":"Sets the page size for each concurrent process that transfers OData records from your SAP instance. A concurrent process is query that retrieves a batch of records as part of a flow run. Amazon AppFlow can run multiple concurrent processes in parallel to transfer data faster.
" + }, + "SAPODataParallelismConfig":{ + "type":"structure", + "required":["maxParallelism"], + "members":{ + "maxParallelism":{ + "shape":"SAPODataMaxParallelism", + "documentation":"The maximum number of processes that Amazon AppFlow runs at the same time when it retrieves your data from your SAP application.
", + "box":true + } + }, + "documentation":"Sets the number of concurrent processes that transfer OData records from your SAP instance. A concurrent process is query that retrieves a batch of records as part of a flow run. Amazon AppFlow can run multiple concurrent processes in parallel to transfer data faster.
" + }, "SAPODataSourceProperties":{ "type":"structure", "members":{ "objectPath":{ "shape":"Object", "documentation":"The object path specified in the SAPOData flow source.
" + }, + "parallelismConfig":{ + "shape":"SAPODataParallelismConfig", + "documentation":"Sets the number of concurrent processes that transfers OData records from your SAP instance.
" + }, + "paginationConfig":{ + "shape":"SAPODataPaginationConfig", + "documentation":"Sets the page size for each concurrent process that transfers OData records from your SAP instance.
" } }, "documentation":"The properties that are applied when using SAPOData as a flow source.
" diff --git a/botocore/data/apprunner/2020-05-15/endpoint-rule-set-1.json b/botocore/data/apprunner/2020-05-15/endpoint-rule-set-1.json index 9d35edd948..705f0bb679 100644 --- a/botocore/data/apprunner/2020-05-15/endpoint-rule-set-1.json +++ b/botocore/data/apprunner/2020-05-15/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://apprunner-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://apprunner-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://apprunner-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://apprunner-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://apprunner.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://apprunner.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://apprunner.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://apprunner.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/botocore/data/apprunner/2020-05-15/service-2.json b/botocore/data/apprunner/2020-05-15/service-2.json index b33c1b645a..f1e333f14a 100644 --- a/botocore/data/apprunner/2020-05-15/service-2.json +++ b/botocore/data/apprunner/2020-05-15/service-2.json @@ -56,7 +56,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"Create an App Runner connection resource. App Runner requires a connection resource when you create App Runner services that access private repositories from certain third-party providers. You can share a connection across multiple services.
A connection resource is needed to access GitHub repositories. GitHub requires a user interface approval process through the App Runner console before you can use the connection.
" + "documentation":"Create an App Runner connection resource. App Runner requires a connection resource when you create App Runner services that access private repositories from certain third-party providers. You can share a connection across multiple services.
A connection resource is needed to access GitHub and Bitbucket repositories. Both require a user interface approval process through the App Runner console before you can use the connection.
" }, "CreateObservabilityConfiguration":{ "name":"CreateObservabilityConfiguration", @@ -2182,7 +2182,10 @@ }, "ProviderType":{ "type":"string", - "enum":["GITHUB"] + "enum":[ + "GITHUB", + "BITBUCKET" + ] }, "ResourceNotFoundException":{ "type":"structure", @@ -2318,7 +2321,7 @@ }, "Status":{ "shape":"ServiceStatus", - "documentation":"The current state of the App Runner service. These particular values mean the following.
CREATE_FAILED
– The service failed to create. To troubleshoot this failure, read the failure events and logs, change any parameters that need to be fixed, and retry the call to create the service.
The failed service isn't usable, and still counts towards your service quota. When you're done analyzing the failure, delete the service.
DELETE_FAILED
– The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure that all related resources are removed.
The current state of the App Runner service. These particular values mean the following.
CREATE_FAILED
– The service failed to create. The failed service isn't usable, and still counts towards your service quota. To troubleshoot this failure, read the failure events and logs, change any parameters that need to be fixed, and rebuild your service using UpdateService
.
DELETE_FAILED
– The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure that all related resources are removed.
The current state of the App Runner service. These particular values mean the following.
CREATE_FAILED
– The service failed to create. Read the failure events and logs, change any parameters that need to be fixed, and retry the call to create the service.
The failed service isn't usable, and still counts towards your service quota. When you're done analyzing the failure, delete the service.
DELETE_FAILED
– The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure that all related resources are removed.
The current state of the App Runner service. These particular values mean the following.
CREATE_FAILED
– The service failed to create. The failed service isn't usable, and still counts towards your service quota. To troubleshoot this failure, read the failure events and logs, change any parameters that need to be fixed, and rebuild your service using UpdateService
.
DELETE_FAILED
– The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure that all related resources are removed.
Provides summary information for an App Runner service.
This type contains limited information about a service. It doesn't include configuration details. It's returned by the ListServices action. Complete service information is returned by the CreateService, DescribeService, and DeleteService actions using the Service type.
" diff --git a/botocore/data/auditmanager/2017-07-25/endpoint-rule-set-1.json b/botocore/data/auditmanager/2017-07-25/endpoint-rule-set-1.json index 29247a7d2d..b38eb1c9a6 100644 --- a/botocore/data/auditmanager/2017-07-25/endpoint-rule-set-1.json +++ b/botocore/data/auditmanager/2017-07-25/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://auditmanager-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://auditmanager-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://auditmanager-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://auditmanager-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://auditmanager.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://auditmanager.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://auditmanager.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://auditmanager.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/botocore/data/auditmanager/2017-07-25/service-2.json b/botocore/data/auditmanager/2017-07-25/service-2.json index 8420122feb..af2cdd2115 100644 --- a/botocore/data/auditmanager/2017-07-25/service-2.json +++ b/botocore/data/auditmanager/2017-07-25/service-2.json @@ -1013,7 +1013,8 @@ }, "AWSAccounts":{ "type":"list", - "member":{"shape":"AWSAccount"} + "member":{"shape":"AWSAccount"}, + "sensitive":true }, "AWSService":{ "type":"structure", @@ -1081,12 +1082,14 @@ "ActionPlanInstructions":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "ActionPlanTitle":{ "type":"string", "max":300, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "Assessment":{ "type":"structure", @@ -1205,7 +1208,8 @@ "AssessmentDescription":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "AssessmentEvidenceFolder":{ "type":"structure", @@ -1312,7 +1316,8 @@ "documentation":"The control sets that are associated with the framework.
" } }, - "documentation":"The file used to structure and automate Audit Manager assessments for a given compliance standard.
" + "documentation":"The file used to structure and automate Audit Manager assessments for a given compliance standard.
", + "sensitive":true }, "AssessmentFrameworkDescription":{ "type":"string", @@ -1532,7 +1537,8 @@ "type":"string", "max":300, "min":1, - "pattern":"^[^\\\\]*$" + "pattern":"^[^\\\\]*$", + "sensitive":true }, "AssessmentReport":{ "type":"structure", @@ -1579,7 +1585,8 @@ "AssessmentReportDescription":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "AssessmentReportDestinationType":{ "type":"string", @@ -1671,7 +1678,8 @@ "documentation":"The destination bucket where Audit Manager stores assessment reports.
" } }, - "documentation":"The location where Audit Manager saves assessment reports for the given assessment.
" + "documentation":"The location where Audit Manager saves assessment reports for the given assessment.
", + "sensitive":true }, "AssessmentReportsMetadata":{ "type":"list", @@ -1771,7 +1779,8 @@ }, "BatchCreateDelegationByAssessmentErrors":{ "type":"list", - "member":{"shape":"BatchCreateDelegationByAssessmentError"} + "member":{"shape":"BatchCreateDelegationByAssessmentError"}, + "sensitive":true }, "BatchCreateDelegationByAssessmentRequest":{ "type":"structure", @@ -1825,7 +1834,8 @@ }, "BatchDeleteDelegationByAssessmentErrors":{ "type":"list", - "member":{"shape":"BatchDeleteDelegationByAssessmentError"} + "member":{"shape":"BatchDeleteDelegationByAssessmentError"}, + "sensitive":true }, "BatchDeleteDelegationByAssessmentRequest":{ "type":"structure", @@ -1996,7 +2006,8 @@ "ComplianceType":{ "type":"string", "max":100, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "Control":{ "type":"structure", @@ -2085,7 +2096,8 @@ "ControlCommentBody":{ "type":"string", "max":500, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "ControlComments":{ "type":"list", @@ -2316,7 +2328,8 @@ "ControlSets":{ "type":"list", "member":{"shape":"ControlSet"}, - "min":1 + "min":1, + "sensitive":true }, "ControlSetsCount":{"type":"integer"}, "ControlSources":{ @@ -2607,13 +2620,15 @@ "type":"list", "member":{"shape":"CreateDelegationRequest"}, "max":50, - "min":1 + "min":1, + "sensitive":true }, "CreatedBy":{ "type":"string", "max":100, "min":1, - "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$" + "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$", + "sensitive":true }, "DefaultExportDestination":{ "type":"structure", @@ -2677,12 +2692,14 @@ "documentation":"The user or role that created the delegation.
" } }, - "documentation":"The assignment of a control set to a delegate for review.
" + "documentation":"The assignment of a control set to a delegate for review.
", + "sensitive":true }, "DelegationComment":{ "type":"string", "max":350, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "DelegationIds":{ "type":"list", @@ -2916,7 +2933,8 @@ "type":"string", "max":320, "min":1, - "pattern":"^.*@.*$" + "pattern":"^.*@.*$", + "sensitive":true }, "ErrorCode":{ "type":"string", @@ -3466,7 +3484,8 @@ "shape":"NonEmptyString", "documentation":"The presigned URL that was generated.
" } - } + }, + "sensitive":true }, "GetEvidenceFolderRequest":{ "type":"structure", @@ -3843,7 +3862,8 @@ "type":"string", "max":100, "min":1, - "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$" + "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$", + "sensitive":true }, "ListAssessmentControlInsightsByControlDomainRequest":{ "type":"structure", @@ -4292,13 +4312,15 @@ "type":"string", "max":300, "min":1, - "pattern":"[^\\/]*" + "pattern":"[^\\/]*", + "sensitive":true }, "ManualEvidenceTextResponse":{ "type":"string", "max":1000, "min":1, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "MaxResults":{ "type":"integer", @@ -4491,7 +4513,8 @@ }, "Roles":{ "type":"list", - "member":{"shape":"Role"} + "member":{"shape":"Role"}, + "sensitive":true }, "S3Url":{ "type":"string", @@ -4503,7 +4526,8 @@ "type":"string", "max":255, "min":1, - "pattern":"^[a-zA-Z0-9-_\\(\\)\\[\\]]+$" + "pattern":"^[a-zA-Z0-9-_\\(\\)\\[\\]]+$", + "sensitive":true }, "Scope":{ "type":"structure", @@ -4517,7 +4541,8 @@ "documentation":"The Amazon Web Services services that are included in the scope of the assessment.
" } }, - "documentation":"The wrapper that contains the Amazon Web Services accounts and services that are in scope for the assessment.
" + "documentation":"The wrapper that contains the Amazon Web Services accounts and services that are in scope for the assessment.
", + "sensitive":true }, "ServiceMetadata":{ "type":"structure", @@ -4789,7 +4814,8 @@ "TestingInformation":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "ThrottlingException":{ "type":"structure", @@ -4817,7 +4843,8 @@ "TroubleshootingText":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "URL":{ "type":"structure", @@ -5224,7 +5251,8 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9-_()\\s\\+=,.@]+$" + "pattern":"^[a-zA-Z0-9-_()\\s\\+=,.@]+$", + "sensitive":true }, "ValidateAssessmentReportIntegrityRequest":{ "type":"structure", diff --git a/botocore/data/cleanrooms/2022-02-17/service-2.json b/botocore/data/cleanrooms/2022-02-17/service-2.json index 5560200444..9c97b12fc3 100644 --- a/botocore/data/cleanrooms/2022-02-17/service-2.json +++ b/botocore/data/cleanrooms/2022-02-17/service-2.json @@ -674,7 +674,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Creates a protected query that is started by Clean Rooms .
" + "documentation":"Creates a protected query that is started by Clean Rooms.
" }, "TagResource":{ "name":"TagResource", @@ -939,7 +939,7 @@ "AllowedColumnList":{ "type":"list", "member":{"shape":"ColumnName"}, - "max":100, + "max":225, "min":1 }, "AnalysisFormat":{ @@ -2426,6 +2426,10 @@ "tags":{ "shape":"TagMap", "documentation":"An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.
" + }, + "defaultResultConfiguration":{ + "shape":"MembershipProtectedQueryResultConfiguration", + "documentation":"The default protected query result configuration as specified by the member who can receive results.
" } } }, @@ -2990,7 +2994,9 @@ }, "KeyPrefix":{ "type":"string", - "pattern":"[\\w!.*/-]*" + "max":512, + "min":0, + "pattern":"[\\w!.=*/-]*" }, "ListAnalysisTemplatesInput":{ "type":"structure", @@ -3532,6 +3538,10 @@ "queryLogStatus":{ "shape":"MembershipQueryLogStatus", "documentation":"An indicator as to whether query logging has been enabled or disabled for the collaboration.
" + }, + "defaultResultConfiguration":{ + "shape":"MembershipProtectedQueryResultConfiguration", + "documentation":"The default protected query result configuration as specified by the member who can receive results.
" } }, "documentation":"The membership object.
" @@ -3548,6 +3558,29 @@ "min":36, "pattern":".*[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}.*" }, + "MembershipProtectedQueryOutputConfiguration":{ + "type":"structure", + "members":{ + "s3":{"shape":"ProtectedQueryS3OutputConfiguration"} + }, + "documentation":"Contains configurations for protected query results.
", + "union":true + }, + "MembershipProtectedQueryResultConfiguration":{ + "type":"structure", + "required":["outputConfiguration"], + "members":{ + "outputConfiguration":{ + "shape":"MembershipProtectedQueryOutputConfiguration", + "documentation":"Configuration for protected query results.
" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"The unique ARN for an IAM role that is used by Clean Rooms to write protected query results to the result location, given by the member who can receive results.
" + } + }, + "documentation":"Contains configurations for protected query results.
" + }, "MembershipQueryLogStatus":{ "type":"string", "enum":[ @@ -3678,9 +3711,7 @@ "membershipId", "membershipArn", "createTime", - "sqlParameters", - "status", - "resultConfiguration" + "status" ], "members":{ "id":{ @@ -3750,12 +3781,20 @@ "min":1, "pattern":".*[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}.*" }, + "ProtectedQueryMemberOutputList":{ + "type":"list", + "member":{"shape":"ProtectedQuerySingleMemberOutput"} + }, "ProtectedQueryOutput":{ "type":"structure", "members":{ "s3":{ "shape":"ProtectedQueryS3Output", "documentation":"If present, the output for a protected query with an `S3` output type.
" + }, + "memberList":{ + "shape":"ProtectedQueryMemberOutputList", + "documentation":"The list of member Amazon Web Services account(s) that received the results of the query.
" } }, "documentation":"Contains details about the protected query output.
", @@ -3857,6 +3896,17 @@ "max":15000, "min":0 }, + "ProtectedQuerySingleMemberOutput":{ + "type":"structure", + "required":["accountId"], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"The Amazon Web Services account ID of the member in the collaboration who can receive results for the query.
" + } + }, + "documentation":"Details about the member who received the query result.
" + }, "ProtectedQueryStatistics":{ "type":"structure", "members":{ @@ -4168,8 +4218,7 @@ "required":[ "type", "membershipIdentifier", - "sqlParameters", - "resultConfiguration" + "sqlParameters" ], "members":{ "type":{ @@ -4510,6 +4559,10 @@ "queryLogStatus":{ "shape":"MembershipQueryLogStatus", "documentation":"An indicator as to whether query logging has been enabled or disabled for the collaboration.
" + }, + "defaultResultConfiguration":{ + "shape":"MembershipProtectedQueryResultConfiguration", + "documentation":"The default protected query result configuration as specified by the member who can receive results.
" } } }, @@ -4608,5 +4661,5 @@ ] } }, - "documentation":"Welcome to the Clean Rooms API Reference.
Clean Rooms is an Amazon Web Services service that helps multiple parties to join their data together in a secure collaboration workspace. In the collaboration, members who can query and receive results can get insights into the collective datasets without either party getting access to the other party's raw data.
To learn more about Clean Rooms concepts, procedures, and best practices, see the Clean Rooms User Guide.
" + "documentation":"Welcome to the Clean Rooms API Reference.
Clean Rooms is an Amazon Web Services service that helps multiple parties to join their data together in a secure collaboration workspace. In the collaboration, members who can query and receive results can get insights into the collective datasets without either party getting access to the other party's raw data.
To learn more about Clean Rooms concepts, procedures, and best practices, see the Clean Rooms User Guide.
To learn more about SQL commands, functions, and conditions supported in Clean Rooms, see the Clean Rooms SQL Reference.
" } diff --git a/botocore/data/datasync/2018-11-09/service-2.json b/botocore/data/datasync/2018-11-09/service-2.json index 1260e47922..0fe7d4a12b 100644 --- a/botocore/data/datasync/2018-11-09/service-2.json +++ b/botocore/data/datasync/2018-11-09/service-2.json @@ -223,7 +223,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Configures a task, which defines where and how DataSync transfers your data.
A task includes a source location, a destination location, and the preferences for how and when you want to transfer your data (such as bandwidth limits, scheduling, among other options).
If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.
Configures a transfer task, which defines where and how DataSync moves your data.
A task includes a source location, destination location, and the options for how and when you want to transfer your data (such as bandwidth limits, scheduling, among other options).
If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.
Deletes an DataSync task.
" + "documentation":"Deletes an DataSync transfer task.
" }, "DescribeAgent":{ "name":"DescribeAgent", @@ -521,7 +521,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Provides information about an DataSync transfer task that's running.
" + "documentation":"Provides information about an execution of your DataSync task. You can use this operation to help monitor the progress of an ongoing transfer or check the results of the transfer.
" }, "GenerateRecommendations":{ "name":"GenerateRecommendations", @@ -680,7 +680,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Starts an DataSync task. For each task, you can only run one task execution at a time.
There are several phases to a task execution. For more information, see Task execution statuses.
If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.
Starts an DataSync transfer task. For each task, you can only run one task execution at a time.
There are several phases to a task execution. For more information, see Task execution statuses.
If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.
Updates the metadata associated with a task.
" + "documentation":"Updates the configuration of a DataSync transfer task.
" }, "UpdateTaskExecution":{ "name":"UpdateTaskExecution", @@ -865,7 +865,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Modifies a running DataSync task.
Currently, the only Option
that you can modify with UpdateTaskExecution
is BytesPerSecond
, which throttles bandwidth for a running or queued task.
Updates the configuration of a running DataSync task execution.
Currently, the only Option
that you can modify with UpdateTaskExecution
is BytesPerSecond
, which throttles bandwidth for a running or queued task execution.
Specifies a SAS token that provides permissions at the Azure storage account, container, or folder level.
The token is part of the SAS URI string that comes after the storage resource URI and a question mark. A token looks something like this:
sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D
Specifies a SAS token that provides permissions to access your Azure Blob Storage.
The token is part of the SAS URI string that comes after the storage resource URI and a question mark. A token looks something like this:
sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D
The shared access signature (SAS) configuration that allows DataSync to access your Microsoft Azure Blob Storage.
For more information, see SAS tokens for accessing your Azure Blob Storage.
" @@ -1675,6 +1675,10 @@ "Includes":{ "shape":"FilterList", "documentation":"Specifies a list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + }, + "TaskReportConfig":{ + "shape":"TaskReportConfig", + "documentation":"Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.
" } }, "documentation":"CreateTaskRequest
" @@ -2456,7 +2460,7 @@ "members":{ "TaskExecutionArn":{ "shape":"TaskExecutionArn", - "documentation":"Specifies the Amazon Resource Name (ARN) of the transfer task that's running.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the task execution that you want information about.
" } }, "documentation":"DescribeTaskExecutionRequest
" @@ -2466,11 +2470,11 @@ "members":{ "TaskExecutionArn":{ "shape":"TaskExecutionArn", - "documentation":"The Amazon Resource Name (ARN) of the task execution that was described. TaskExecutionArn
is hierarchical and includes TaskArn
for the task that was executed.
For example, a TaskExecution
value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b
executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2
.
The ARN of the task execution that you wanted information about. TaskExecutionArn
is hierarchical and includes TaskArn
for the task that was executed.
For example, a TaskExecution
value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b
executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2
.
The status of the task execution.
For detailed information about task execution statuses, see Understanding Task Statuses in the DataSync User Guide.
" + "documentation":"The status of the task execution.
" }, "Options":{"shape":"Options"}, "Excludes":{ @@ -2483,23 +2487,23 @@ }, "StartTime":{ "shape":"Time", - "documentation":"The time that the task execution was started.
" + "documentation":"The time when the task execution started.
" }, "EstimatedFilesToTransfer":{ "shape":"long", - "documentation":"The expected number of files that is to be transferred over the network. This value is calculated during the PREPARING
phase before the TRANSFERRING
phase of the task execution. This value is the expected number of files to be transferred. It's calculated based on comparing the content of the source and destination locations and finding the delta that needs to be transferred.
The expected number of files, objects, and directories that DataSync will transfer over the network. This value is calculated during the task execution's PREPARING
phase before the TRANSFERRING
phase. The calculation is based on comparing the content of the source and destination locations and finding the difference that needs to be transferred.
The estimated physical number of bytes that is to be transferred over the network.
" + "documentation":"The estimated physical number of bytes that will transfer over the network.
" }, "FilesTransferred":{ "shape":"long", - "documentation":"The actual number of files that was transferred over the network. This value is calculated and updated on an ongoing basis during the TRANSFERRING
phase of the task execution. It's updated periodically when each file is read from the source and sent over the network.
If failures occur during a transfer, this value can be less than EstimatedFilesToTransfer
. In some cases, this value can also be greater than EstimatedFilesToTransfer
. This element is implementation-specific for some location types, so don't use it as an indicator for a correct file number or to monitor your task execution.
The actual number of files, objects, and directories that DataSync transferred over the network. This value is updated periodically during the task execution's TRANSFERRING
phase when something is read from the source and sent over the network.
If DataSync fails to transfer something, this value can be less than EstimatedFilesToTransfer
. In some cases, this value can also be greater than EstimatedFilesToTransfer
. This element is implementation-specific for some location types, so don't use it as an exact indication of what transferred or to monitor your task execution.
The number of logical bytes written to the destination Amazon Web Services storage resource.
" + "documentation":"The number of logical bytes written to the destination location.
" }, "BytesTransferred":{ "shape":"long", @@ -2512,6 +2516,30 @@ "BytesCompressed":{ "shape":"long", "documentation":"The physical number of bytes transferred over the network after compression was applied. In most cases, this number is less than BytesTransferred
unless the data isn't compressible.
The configuration of your task report, which provides detailed information about for your DataSync transfer.
" + }, + "FilesDeleted":{ + "shape":"long", + "documentation":"The number of files, objects, and directories that DataSync deleted in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0
.
The number of files, objects, and directories that DataSync skipped during your transfer.
" + }, + "FilesVerified":{ + "shape":"long", + "documentation":"The number of files, objects, and directories that DataSync verified during your transfer.
" + }, + "ReportResult":{ + "shape":"ReportResult", + "documentation":"Indicates whether DataSync generated a complete task report for your transfer.
" + }, + "EstimatedFilesToDelete":{ + "shape":"long", + "documentation":"The expected number of files, objects, and directories that DataSync will delete in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0
.
DescribeTaskExecutionResponse
" @@ -2593,6 +2621,10 @@ "Includes":{ "shape":"FilterList", "documentation":"A list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + }, + "TaskReportConfig":{ + "shape":"TaskReportConfig", + "documentation":"The configuration of your task report. For more information, see Creating a task report.
" } }, "documentation":"DescribeTaskResponse
" @@ -3744,6 +3776,13 @@ "NONE" ] }, + "ObjectVersionIds":{ + "type":"string", + "enum":[ + "INCLUDE", + "NONE" + ] + }, "OnPremConfig":{ "type":"structure", "required":["AgentArns"], @@ -3834,7 +3873,7 @@ "documentation":"Specifies whether object tags are preserved when transferring between object storage systems. If you want your DataSync task to ignore object tags, specify the NONE
value.
Default Value: PRESERVE
Configures your DataSync task settings. These options include how DataSync handles files, objects, and their associated metadata. You also can specify how DataSync verifies data integrity, set bandwidth limits for your task, among other options.
Each task setting has a default value. Unless you need to, you don't have to configure any of these Options
before starting your task.
Indicates how your transfer task is configured. These options include how DataSync handles files, objects, and their associated metadata during your transfer. You also can specify how to verify data integrity, set bandwidth limits for your task, among other options.
Each option has a default value. Unless you need to, you don't have to configure any of these options before starting your task.
" }, "OutputTagList":{ "type":"list", @@ -4017,6 +4056,102 @@ "members":{ } }, + "ReportDestination":{ + "type":"structure", + "members":{ + "S3":{ + "shape":"ReportDestinationS3", + "documentation":"Specifies the Amazon S3 bucket where DataSync uploads your task report.
" + } + }, + "documentation":"Specifies where DataSync uploads your task report.
" + }, + "ReportDestinationS3":{ + "type":"structure", + "required":[ + "S3BucketArn", + "BucketAccessRoleArn" + ], + "members":{ + "Subdirectory":{ + "shape":"S3Subdirectory", + "documentation":"Specifies a bucket prefix for your report.
" + }, + "S3BucketArn":{ + "shape":"S3BucketArn", + "documentation":"Specifies the ARN of the S3 bucket where DataSync uploads your report.
" + }, + "BucketAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see Allowing DataSync to upload a task report to an Amazon S3 bucket.
" + } + }, + "documentation":"Specifies the Amazon S3 bucket where DataSync uploads your task report.
" + }, + "ReportLevel":{ + "type":"string", + "enum":[ + "ERRORS_ONLY", + "SUCCESSES_AND_ERRORS" + ] + }, + "ReportOutputType":{ + "type":"string", + "enum":[ + "SUMMARY_ONLY", + "STANDARD" + ] + }, + "ReportOverride":{ + "type":"structure", + "members":{ + "ReportLevel":{ + "shape":"ReportLevel", + "documentation":"Specifies whether your task report includes errors only or successes and errors.
For example, your report might mostly include only what didn't go well in your transfer (ERRORS_ONLY
). At the same time, you want to verify that your task filter is working correctly. In this situation, you can get a list of what files DataSync successfully skipped and if something transferred that you didn't to transfer (SUCCESSES_AND_ERRORS
).
Specifies the level of detail for a particular aspect of your DataSync task report.
" + }, + "ReportOverrides":{ + "type":"structure", + "members":{ + "Transferred":{ + "shape":"ReportOverride", + "documentation":"Specifies the level of reporting for the files, objects, and directories that DataSync attempted to transfer.
" + }, + "Verified":{ + "shape":"ReportOverride", + "documentation":"Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify at the end of your transfer. This only applies if you configure your task to verify data during and after the transfer (which DataSync does by default).
" + }, + "Deleted":{ + "shape":"ReportOverride", + "documentation":"Specifies the level of reporting for the files, objects, and directories that DataSync attempted to delete in your destination location. This only applies if you configure your task to delete data in the destination that isn't in the source.
" + }, + "Skipped":{ + "shape":"ReportOverride", + "documentation":"Specifies the level of reporting for the files, objects, and directories that DataSync attempted to skip during your transfer.
" + } + }, + "documentation":"The level of detail included in each aspect of your DataSync task report.
" + }, + "ReportResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"PhaseStatus", + "documentation":"Indicates whether DataSync is still working on your report, created a report, or can't create a complete report.
" + }, + "ErrorCode":{ + "shape":"string", + "documentation":"Indicates the code associated with the error if DataSync can't create a complete report.
" + }, + "ErrorDetail":{ + "shape":"string", + "documentation":"Provides details about issues creating a report.
" + } + }, + "documentation":"Indicates whether DataSync created a complete task report for your transfer.
" + }, "ResourceDetails":{ "type":"structure", "members":{ @@ -4234,6 +4369,10 @@ "Tags":{ "shape":"InputTagList", "documentation":"Specifies the tags that you want to apply to the Amazon Resource Name (ARN) representing the task execution.
Tags are key-value pairs that help you manage, filter, and search for your DataSync resources.
" + }, + "TaskReportConfig":{ + "shape":"TaskReportConfig", + "documentation":"Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.
" } }, "documentation":"StartTaskExecutionRequest
" @@ -4505,6 +4644,32 @@ "DISABLED" ] }, + "TaskReportConfig":{ + "type":"structure", + "members":{ + "Destination":{ + "shape":"ReportDestination", + "documentation":"Specifies the Amazon S3 bucket where DataSync uploads your task report. For more information, see Task reports.
" + }, + "OutputType":{ + "shape":"ReportOutputType", + "documentation":"Specifies the type of task report that you want:
SUMMARY_ONLY
: Provides necessary details about your task, including the number of files, objects, and directories transferred and transfer duration.
STANDARD
: Provides complete details about your task, including a full list of files, objects, and directories that were transferred, skipped, verified, and more.
Specifies whether you want your task report to include only what went wrong with your transfer or a list of what succeeded and didn't.
ERRORS_ONLY
: A report shows what DataSync was unable to transfer, skip, verify, and delete.
SUCCESSES_AND_ERRORS
: A report shows what DataSync was able and unable to transfer, skip, verify, and delete.
Specifies whether your task report includes the new version of each object transferred into an S3 bucket. This only applies if you enable versioning on your bucket. Keep in mind that setting this to INCLUDE
can increase the duration of your task execution.
Customizes the reporting level for aspects of your task report. For example, your report might generally only include errors, but you could specify that you want a list of successes and errors just for the files that DataSync attempted to delete in your destination location.
" + } + }, + "documentation":"Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.
For more information, see Task reports.
" + }, "TaskSchedule":{ "type":"structure", "required":["ScheduleExpression"], @@ -4915,6 +5080,10 @@ "Includes":{ "shape":"FilterList", "documentation":"Specifies a list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + }, + "TaskReportConfig":{ + "shape":"TaskReportConfig", + "documentation":"Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.
" } }, "documentation":"UpdateTaskResponse
" diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index c29cd48dfc..a28a251971 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -3927,6 +3927,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -10410,6 +10411,7 @@ "deprecated" : true, "hostname" : "macie2-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { diff --git a/botocore/data/neptunedata/2023-08-01/endpoint-rule-set-1.json b/botocore/data/neptunedata/2023-08-01/endpoint-rule-set-1.json new file mode 100644 index 0000000000..8775e2ea8e --- /dev/null +++ b/botocore/data/neptunedata/2023-08-01/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://neptune-db-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://neptune-db-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://neptune-db.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://neptune-db.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/botocore/data/neptunedata/2023-08-01/paginators-1.json b/botocore/data/neptunedata/2023-08-01/paginators-1.json new file mode 100644 index 0000000000..ea142457a6 --- /dev/null +++ b/botocore/data/neptunedata/2023-08-01/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/neptunedata/2023-08-01/service-2.json b/botocore/data/neptunedata/2023-08-01/service-2.json new file mode 100644 index 0000000000..3c84553911 --- /dev/null +++ b/botocore/data/neptunedata/2023-08-01/service-2.json @@ -0,0 +1,4555 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2023-08-01", + "endpointPrefix":"neptune-db", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon NeptuneData", + "serviceId":"neptunedata", + "signatureVersion":"v4", + "signingName":"neptune-db", + "uid":"neptunedata-2023-08-01" + }, + "operations":{ + "CancelGremlinQuery":{ + "name":"CancelGremlinQuery", + "http":{ + "method":"DELETE", + "requestUri":"/gremlin/status/{queryId}", + "responseCode":200 + }, + "input":{"shape":"CancelGremlinQueryInput"}, + "output":{"shape":"CancelGremlinQueryOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Cancels a Gremlin query. See Gremlin query cancellation for more information.
", + "authtype":"v4", + "idempotent":true + }, + "CancelLoaderJob":{ + "name":"CancelLoaderJob", + "http":{ + "method":"DELETE", + "requestUri":"/loader/{loadId}", + "responseCode":200 + }, + "input":{"shape":"CancelLoaderJobInput"}, + "output":{"shape":"CancelLoaderJobOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"BulkLoadIdNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"LoadUrlAccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Cancels a specified load job. This is an HTTP DELETE
request.
See Neptune Loader Get-Status API for more information.
", + "authtype":"v4", + "idempotent":true + }, + "CancelMLDataProcessingJob":{ + "name":"CancelMLDataProcessingJob", + "http":{ + "method":"DELETE", + "requestUri":"/ml/dataprocessing/{id}", + "responseCode":200 + }, + "input":{"shape":"CancelMLDataProcessingJobInput"}, + "output":{"shape":"CancelMLDataProcessingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Cancels a Neptune ML data processing job. See The dataprocessing
command.
Cancels a Neptune ML model training job. See Model training using the modeltraining
command.
Cancels a specified model transform job. See Use a trained model to generate new model artifacts.
", + "authtype":"v4", + "idempotent":true + }, + "CancelOpenCypherQuery":{ + "name":"CancelOpenCypherQuery", + "http":{ + "method":"DELETE", + "requestUri":"/opencypher/status/{queryId}", + "responseCode":200 + }, + "input":{"shape":"CancelOpenCypherQueryInput"}, + "output":{"shape":"CancelOpenCypherQueryOutput"}, + "errors":[ + {"shape":"InvalidNumericDataException"}, + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Cancels a specified openCypher query. See Neptune openCypher status endpoint for more information.
", + "authtype":"v4", + "idempotent":true + }, + "CreateMLEndpoint":{ + "name":"CreateMLEndpoint", + "http":{ + "method":"POST", + "requestUri":"/ml/endpoints", + "responseCode":200 + }, + "input":{"shape":"CreateMLEndpointInput"}, + "output":{"shape":"CreateMLEndpointOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Creates a new Neptune ML inference endpoint that lets you query one specific model that the model-training process constructed. See Managing inference endpoints using the endpoints command.
", + "authtype":"v4" + }, + "DeleteMLEndpoint":{ + "name":"DeleteMLEndpoint", + "http":{ + "method":"DELETE", + "requestUri":"/ml/endpoints/{id}", + "responseCode":200 + }, + "input":{"shape":"DeleteMLEndpointInput"}, + "output":{"shape":"DeleteMLEndpointOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Cancels the creation of a Neptune ML inference endpoint. See Managing inference endpoints using the endpoints command.
", + "authtype":"v4", + "idempotent":true + }, + "DeletePropertygraphStatistics":{ + "name":"DeletePropertygraphStatistics", + "http":{ + "method":"DELETE", + "requestUri":"/propertygraph/statistics", + "responseCode":200 + }, + "output":{"shape":"DeletePropertygraphStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Deletes statistics for Gremlin and openCypher (property graph) data.
", + "authtype":"v4", + "idempotent":true + }, + "DeleteSparqlStatistics":{ + "name":"DeleteSparqlStatistics", + "http":{ + "method":"DELETE", + "requestUri":"/sparql/statistics", + "responseCode":200 + }, + "output":{"shape":"DeleteSparqlStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Deletes SPARQL statistics
", + "authtype":"v4", + "idempotent":true + }, + "ExecuteFastReset":{ + "name":"ExecuteFastReset", + "http":{ + "method":"POST", + "requestUri":"/system", + "responseCode":200 + }, + "input":{"shape":"ExecuteFastResetInput"}, + "output":{"shape":"ExecuteFastResetOutput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ServerShutdownException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"The fast reset REST API lets you reset a Neptune graph quicky and easily, removing all of its data.
Neptune fast reset is a two-step process. First you call ExecuteFastReset
with action
set to initiateDatabaseReset
. This returns a UUID token which you then include when calling ExecuteFastReset
again with action
set to performDatabaseReset
. See Empty an Amazon Neptune DB cluster using the fast reset API.
Executes a Gremlin Explain query.
Amazon Neptune has added a Gremlin feature named explain
that provides is a self-service tool for understanding the execution approach being taken by the Neptune engine for the query. You invoke it by adding an explain
parameter to an HTTP call that submits a Gremlin query.
The explain feature provides information about the logical structure of query execution plans. You can use this information to identify potential evaluation and execution bottlenecks and to tune your query, as explained in Tuning Gremlin queries. You can also use query hints to improve query execution plans.
", + "authtype":"v4" + }, + "ExecuteGremlinProfileQuery":{ + "name":"ExecuteGremlinProfileQuery", + "http":{ + "method":"POST", + "requestUri":"/gremlin/profile", + "responseCode":200 + }, + "input":{"shape":"ExecuteGremlinProfileQueryInput"}, + "output":{"shape":"ExecuteGremlinProfileQueryOutput"}, + "errors":[ + {"shape":"QueryTooLargeException"}, + {"shape":"BadRequestException"}, + {"shape":"QueryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"QueryLimitException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"CancelledByUserException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MalformedQueryException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Executes a Gremlin Profile query, which runs a specified traversal, collects various metrics about the run, and produces a profile report as output. See Gremlin profile API in Neptune for details.
", + "authtype":"v4" + }, + "ExecuteGremlinQuery":{ + "name":"ExecuteGremlinQuery", + "http":{ + "method":"POST", + "requestUri":"/gremlin", + "responseCode":200 + }, + "input":{"shape":"ExecuteGremlinQueryInput"}, + "output":{"shape":"ExecuteGremlinQueryOutput"}, + "errors":[ + {"shape":"QueryTooLargeException"}, + {"shape":"BadRequestException"}, + {"shape":"QueryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"QueryLimitException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"CancelledByUserException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MalformedQueryException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"This commands executes a Gremlin query. Amazon Neptune is compatible with Apache TinkerPop3 and Gremlin, so you can use the Gremlin traversal language to query the graph, as described under The Graph in the Apache TinkerPop3 documentation. More details can also be found in Accessing a Neptune graph with Gremlin.
", + "authtype":"v4" + }, + "ExecuteOpenCypherExplainQuery":{ + "name":"ExecuteOpenCypherExplainQuery", + "http":{ + "method":"POST", + "requestUri":"/opencypher/explain", + "responseCode":200 + }, + "input":{"shape":"ExecuteOpenCypherExplainQueryInput"}, + "output":{"shape":"ExecuteOpenCypherExplainQueryOutput"}, + "errors":[ + {"shape":"QueryTooLargeException"}, + {"shape":"InvalidNumericDataException"}, + {"shape":"BadRequestException"}, + {"shape":"QueryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"QueryLimitException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"CancelledByUserException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MalformedQueryException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Executes an openCypher explain
request. See The openCypher explain feature for more information.
Executes an openCypher query. See Accessing the Neptune Graph with openCypher for more information.
Neptune supports building graph applications using openCypher, which is currently one of the most popular query languages among developers working with graph databases. Developers, business analysts, and data scientists like openCypher's declarative, SQL-inspired syntax because it provides a familiar structure in which to querying property graphs.
The openCypher language was originally developed by Neo4j, then open-sourced in 2015 and contributed to the openCypher project under an Apache 2 open-source license.
", + "authtype":"v4" + }, + "GetEngineStatus":{ + "name":"GetEngineStatus", + "http":{ + "method":"GET", + "requestUri":"/status", + "responseCode":200 + }, + "output":{"shape":"GetEngineStatusOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Check the status of the graph database on the host.
", + "authtype":"v4" + }, + "GetGremlinQueryStatus":{ + "name":"GetGremlinQueryStatus", + "http":{ + "method":"GET", + "requestUri":"/gremlin/status/{queryId}", + "responseCode":200 + }, + "input":{"shape":"GetGremlinQueryStatusInput"}, + "output":{"shape":"GetGremlinQueryStatusOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Gets the status of a specified Gremlin query.
", + "authtype":"v4" + }, + "GetLoaderJobStatus":{ + "name":"GetLoaderJobStatus", + "http":{ + "method":"GET", + "requestUri":"/loader/{loadId}", + "responseCode":200 + }, + "input":{"shape":"GetLoaderJobStatusInput"}, + "output":{"shape":"GetLoaderJobStatusOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"BulkLoadIdNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"LoadUrlAccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Gets status information about a specified load job. Neptune keeps track of the most recent 1,024 bulk load jobs, and stores the last 10,000 error details per job.
See Neptune Loader Get-Status API for more information.
", + "authtype":"v4" + }, + "GetMLDataProcessingJob":{ + "name":"GetMLDataProcessingJob", + "http":{ + "method":"GET", + "requestUri":"/ml/dataprocessing/{id}", + "responseCode":200 + }, + "input":{"shape":"GetMLDataProcessingJobInput"}, + "output":{"shape":"GetMLDataProcessingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Retrieves information about a specified data processing job. See The dataprocessing
command.
Retrieves details about an inference endpoint. See Managing inference endpoints using the endpoints command.
", + "authtype":"v4" + }, + "GetMLModelTrainingJob":{ + "name":"GetMLModelTrainingJob", + "http":{ + "method":"GET", + "requestUri":"/ml/modeltraining/{id}", + "responseCode":200 + }, + "input":{"shape":"GetMLModelTrainingJobInput"}, + "output":{"shape":"GetMLModelTrainingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Retrieves information about a Neptune ML model training job. See Model training using the modeltraining
command.
Gets information about a specified model transform job. See Use a trained model to generate new model artifacts.
", + "authtype":"v4" + }, + "GetOpenCypherQueryStatus":{ + "name":"GetOpenCypherQueryStatus", + "http":{ + "method":"GET", + "requestUri":"/opencypher/status/{queryId}", + "responseCode":200 + }, + "input":{"shape":"GetOpenCypherQueryStatusInput"}, + "output":{"shape":"GetOpenCypherQueryStatusOutput"}, + "errors":[ + {"shape":"InvalidNumericDataException"}, + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Retrieves the status of a specified openCypher query.
", + "authtype":"v4" + }, + "GetPropertygraphStatistics":{ + "name":"GetPropertygraphStatistics", + "http":{ + "method":"GET", + "requestUri":"/propertygraph/statistics", + "responseCode":200 + }, + "output":{"shape":"GetPropertygraphStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Gets property graph statistics (Gremlin and openCypher).
", + "authtype":"v4" + }, + "GetPropertygraphStream":{ + "name":"GetPropertygraphStream", + "http":{ + "method":"GET", + "requestUri":"/propertygraph/stream", + "responseCode":200 + }, + "input":{"shape":"GetPropertygraphStreamInput"}, + "output":{"shape":"GetPropertygraphStreamOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"ExpiredStreamException"}, + {"shape":"InvalidParameterException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"StreamRecordsNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Gets a stream for a property graph.
With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetPropertygraphStream
lets you collect these change-log entries for a property graph.
The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streams DB cluster parameter to 1
.
See Capturing graph changes in real time using Neptune streams.
", + "authtype":"v4" + }, + "GetPropertygraphSummary":{ + "name":"GetPropertygraphSummary", + "http":{ + "method":"GET", + "requestUri":"/propertygraph/statistics/summary", + "responseCode":200 + }, + "input":{"shape":"GetPropertygraphSummaryInput"}, + "output":{"shape":"GetPropertygraphSummaryOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Gets a graph summary for a property graph.
", + "authtype":"v4" + }, + "GetRDFGraphSummary":{ + "name":"GetRDFGraphSummary", + "http":{ + "method":"GET", + "requestUri":"/rdf/statistics/summary", + "responseCode":200 + }, + "input":{"shape":"GetRDFGraphSummaryInput"}, + "output":{"shape":"GetRDFGraphSummaryOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Gets a graph summary for an RDF graph.
", + "authtype":"v4" + }, + "GetSparqlStatistics":{ + "name":"GetSparqlStatistics", + "http":{ + "method":"GET", + "requestUri":"/sparql/statistics", + "responseCode":200 + }, + "output":{"shape":"GetSparqlStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Gets RDF statistics (SPARQL).
", + "authtype":"v4" + }, + "GetSparqlStream":{ + "name":"GetSparqlStream", + "http":{ + "method":"GET", + "requestUri":"/sparql/stream", + "responseCode":200 + }, + "input":{"shape":"GetSparqlStreamInput"}, + "output":{"shape":"GetSparqlStreamOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"ExpiredStreamException"}, + {"shape":"InvalidParameterException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"StreamRecordsNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Gets a stream for an RDF graph.
With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetSparqlStream
lets you collect these change-log entries for an RDF graph.
The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streams DB cluster parameter to 1
.
See Capturing graph changes in real time using Neptune streams.
", + "authtype":"v4" + }, + "ListGremlinQueries":{ + "name":"ListGremlinQueries", + "http":{ + "method":"GET", + "requestUri":"/gremlin/status", + "responseCode":200 + }, + "input":{"shape":"ListGremlinQueriesInput"}, + "output":{"shape":"ListGremlinQueriesOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Lists active Gremlin queries. See Gremlin query status API for details about the output.
", + "authtype":"v4" + }, + "ListLoaderJobs":{ + "name":"ListLoaderJobs", + "http":{ + "method":"GET", + "requestUri":"/loader", + "responseCode":200 + }, + "input":{"shape":"ListLoaderJobsInput"}, + "output":{"shape":"ListLoaderJobsOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"BulkLoadIdNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"LoadUrlAccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Retrieves a list of the loadIds
for all active loader jobs.
Returns a list of Neptune ML data processing jobs. See Listing active data-processing jobs using the Neptune ML dataprocessing command.
", + "authtype":"v4" + }, + "ListMLEndpoints":{ + "name":"ListMLEndpoints", + "http":{ + "method":"GET", + "requestUri":"/ml/endpoints", + "responseCode":200 + }, + "input":{"shape":"ListMLEndpointsInput"}, + "output":{"shape":"ListMLEndpointsOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Lists existing inference endpoints. See Managing inference endpoints using the endpoints command.
", + "authtype":"v4" + }, + "ListMLModelTrainingJobs":{ + "name":"ListMLModelTrainingJobs", + "http":{ + "method":"GET", + "requestUri":"/ml/modeltraining", + "responseCode":200 + }, + "input":{"shape":"ListMLModelTrainingJobsInput"}, + "output":{"shape":"ListMLModelTrainingJobsOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Lists Neptune ML model-training jobs. See Model training using the modeltraining
command.
Returns a list of model transform job IDs. See Use a trained model to generate new model artifacts.
", + "authtype":"v4" + }, + "ListOpenCypherQueries":{ + "name":"ListOpenCypherQueries", + "http":{ + "method":"GET", + "requestUri":"/opencypher/status", + "responseCode":200 + }, + "input":{"shape":"ListOpenCypherQueriesInput"}, + "output":{"shape":"ListOpenCypherQueriesOutput"}, + "errors":[ + {"shape":"InvalidNumericDataException"}, + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Lists active openCypher queries. See Neptune openCypher status endpoint for more information.
", + "authtype":"v4" + }, + "ManagePropertygraphStatistics":{ + "name":"ManagePropertygraphStatistics", + "http":{ + "method":"POST", + "requestUri":"/propertygraph/statistics", + "responseCode":200 + }, + "input":{"shape":"ManagePropertygraphStatisticsInput"}, + "output":{"shape":"ManagePropertygraphStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Manages the generation and use of property graph statistics.
", + "authtype":"v4", + "idempotent":true + }, + "ManageSparqlStatistics":{ + "name":"ManageSparqlStatistics", + "http":{ + "method":"POST", + "requestUri":"/sparql/statistics", + "responseCode":200 + }, + "input":{"shape":"ManageSparqlStatisticsInput"}, + "output":{"shape":"ManageSparqlStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Manages the generation and use of RDF graph statistics.
", + "authtype":"v4", + "idempotent":true + }, + "StartLoaderJob":{ + "name":"StartLoaderJob", + "http":{ + "method":"POST", + "requestUri":"/loader", + "responseCode":200 + }, + "input":{"shape":"StartLoaderJobInput"}, + "output":{"shape":"StartLoaderJobOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"BulkLoadIdNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"LoadUrlAccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"S3Exception"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"Starts a Neptune bulk loader job to load data from an Amazon S3 bucket into a Neptune DB instance. See Using the Amazon Neptune Bulk Loader to Ingest Data.
", + "authtype":"v4", + "idempotent":true + }, + "StartMLDataProcessingJob":{ + "name":"StartMLDataProcessingJob", + "http":{ + "method":"POST", + "requestUri":"/ml/dataprocessing", + "responseCode":200 + }, + "input":{"shape":"StartMLDataProcessingJobInput"}, + "output":{"shape":"StartMLDataProcessingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Creates a new Neptune ML data processing job for processing the graph data exported from Neptune for training. See The dataprocessing
command.
Creates a new Neptune ML model training job. See Model training using the modeltraining
command.
Creates a new model transform job. See Use a trained model to generate new model artifacts.
", + "authtype":"v4" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised in case of an authentication or authorization failure.
", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Action":{ + "type":"string", + "enum":[ + "initiateDatabaseReset", + "performDatabaseReset" + ] + }, + "BadRequestException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the bad request.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a request is submitted that cannot be processed.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Blob":{"type":"blob"}, + "Boolean":{ + "type":"boolean", + "box":true + }, + "BulkLoadIdNotFoundException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The bulk-load job ID that could not be found.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a specified bulk-load job ID cannot be found.
", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "CancelGremlinQueryInput":{ + "type":"structure", + "required":["queryId"], + "members":{ + "queryId":{ + "shape":"String", + "documentation":"The unique identifier that identifies the query to be canceled.
", + "location":"uri", + "locationName":"queryId" + } + } + }, + "CancelGremlinQueryOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"The status of the cancelation
" + } + } + }, + "CancelLoaderJobInput":{ + "type":"structure", + "required":["loadId"], + "members":{ + "loadId":{ + "shape":"String", + "documentation":"The ID of the load job to be deleted.
", + "location":"uri", + "locationName":"loadId" + } + } + }, + "CancelLoaderJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"The cancellation status.
" + } + } + }, + "CancelMLDataProcessingJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique identifier of the data-processing job.
", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + }, + "clean":{ + "shape":"Boolean", + "documentation":"If set to TRUE
, this flag specifies that all Neptune ML S3 artifacts should be deleted when the job is stopped. The default is FALSE
.
The status of the cancellation request.
" + } + } + }, + "CancelMLModelTrainingJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique identifier of the model-training job to be canceled.
", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + }, + "clean":{ + "shape":"Boolean", + "documentation":"If set to TRUE
, this flag specifies that all Amazon S3 artifacts should be deleted when the job is stopped. The default is FALSE
.
The status of the cancellation.
" + } + } + }, + "CancelMLModelTransformJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique ID of the model transform job to be canceled.
", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + }, + "clean":{ + "shape":"Boolean", + "documentation":"If this flag is set to TRUE
, all Neptune ML S3 artifacts should be deleted when the job is stopped. The default is FALSE
.
the status of the cancelation.
" + } + } + }, + "CancelOpenCypherQueryInput":{ + "type":"structure", + "required":["queryId"], + "members":{ + "queryId":{ + "shape":"String", + "documentation":"The unique ID of the openCypher query to cancel.
", + "location":"uri", + "locationName":"queryId" + }, + "silent":{ + "shape":"Boolean", + "documentation":"If set to TRUE
, causes the cancelation of the openCypher query to happen silently.
The cancellation status of the openCypher query.
" + }, + "payload":{ + "shape":"Boolean", + "documentation":"The cancelation payload for the openCypher query.
" + } + } + }, + "CancelledByUserException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a user cancelled a request.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "Classes":{ + "type":"list", + "member":{"shape":"String"} + }, + "ClientTimeoutException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a request timed out in the client.
", + "error":{ + "httpStatusCode":408, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "ConcurrentModificationException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a request attempts to modify data that is concurrently being modified by another process.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ConstraintViolationException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a value in a request field did not satisfy required constraints.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "CreateMLEndpointInput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name.
" + }, + "mlModelTrainingJobId":{ + "shape":"String", + "documentation":"The job Id of the completed model-training job that has created the model that the inference endpoint will point to. You must supply either the mlModelTrainingJobId
or the mlModelTransformJobId
.
The job Id of the completed model-transform job. You must supply either the mlModelTrainingJobId
or the mlModelTransformJobId
.
If set to true
, update
indicates that this is an update request. The default is false
. You must supply either the mlModelTrainingJobId
or the mlModelTransformJobId
.
The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will be thrown.
" + }, + "modelName":{ + "shape":"String", + "documentation":"Model type for training. By default the Neptune ML model is automatically based on the modelType
used in data processing, but you can specify a different model type here. The default is rgcn
for heterogeneous graphs and kge
for knowledge graphs. The only valid value for heterogeneous graphs is rgcn
. Valid values for knowledge graphs are: kge
, transe
, distmult
, and rotate
.
The type of Neptune ML instance to use for online servicing. The default is ml.m5.xlarge
. Choosing the ML instance for an inference endpoint depends on the task type, the graph size, and your budget.
The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1
" + }, + "volumeEncryptionKMSKey":{ + "shape":"String", + "documentation":"The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.
" + } + } + }, + "CreateMLEndpointOutput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique ID of the new inference endpoint.
" + }, + "arn":{ + "shape":"String", + "documentation":"The ARN for the new inference endpoint.
" + }, + "creationTimeInMillis":{ + "shape":"Long", + "documentation":"The endpoint creation time, in milliseconds.
" + } + } + }, + "CustomModelTrainingParameters":{ + "type":"structure", + "required":["sourceS3DirectoryPath"], + "members":{ + "sourceS3DirectoryPath":{ + "shape":"String", + "documentation":"The path to the Amazon S3 location where the Python module implementing your model is located. This must point to a valid existing Amazon S3 location that contains, at a minimum, a training script, a transform script, and a model-hpo-configuration.json
file.
The name of the entry point in your module of a script that performs model training and takes hyperparameters as command-line arguments, including fixed hyperparameters. The default is training.py
.
The name of the entry point in your module of a script that should be run after the best model from the hyperparameter search has been identified, to compute the model artifacts necessary for model deployment. It should be able to run with no command-line arguments.The default is transform.py
.
Contains custom model training parameters. See Custom models in Neptune ML.
" + }, + "CustomModelTransformParameters":{ + "type":"structure", + "required":["sourceS3DirectoryPath"], + "members":{ + "sourceS3DirectoryPath":{ + "shape":"String", + "documentation":"The path to the Amazon S3 location where the Python module implementing your model is located. This must point to a valid existing Amazon S3 location that contains, at a minimum, a training script, a transform script, and a model-hpo-configuration.json
file.
The name of the entry point in your module of a script that should be run after the best model from the hyperparameter search has been identified, to compute the model artifacts necessary for model deployment. It should be able to run with no command-line arguments. The default is transform.py
.
Contains custom model transform parameters. See Use a trained model to generate new model artifacts.
" + }, + "DeleteMLEndpointInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique identifier of the inference endpoint.
", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will be thrown.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + }, + "clean":{ + "shape":"Boolean", + "documentation":"If this flag is set to TRUE
, all Neptune ML S3 artifacts should be deleted when the job is stopped. The default is FALSE
.
The status of the cancellation.
" + } + } + }, + "DeletePropertygraphStatisticsOutput":{ + "type":"structure", + "members":{ + "statusCode":{ + "shape":"Integer", + "documentation":"The HTTP response code: 200 if the delete was successful, or 204 if there were no statistics to delete.
", + "location":"statusCode" + }, + "status":{ + "shape":"String", + "documentation":"The cancel status.
" + }, + "payload":{ + "shape":"DeleteStatisticsValueMap", + "documentation":"The deletion payload.
" + } + } + }, + "DeleteSparqlStatisticsOutput":{ + "type":"structure", + "members":{ + "statusCode":{ + "shape":"Integer", + "documentation":"The HTTP response code: 200 if the delete was successful, or 204 if there were no statistics to delete.
", + "location":"statusCode" + }, + "status":{ + "shape":"String", + "documentation":"The cancel status.
" + }, + "payload":{ + "shape":"DeleteStatisticsValueMap", + "documentation":"The deletion payload.
" + } + } + }, + "DeleteStatisticsValueMap":{ + "type":"structure", + "members":{ + "active":{ + "shape":"Boolean", + "documentation":"The current status of the statistics.
" + }, + "statisticsId":{ + "shape":"String", + "documentation":"The ID of the statistics generation run that is currently occurring.
" + } + }, + "documentation":"The payload for DeleteStatistics.
" + }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "DocumentValuedMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Document"} + }, + "EdgeLabels":{ + "type":"list", + "member":{"shape":"String"} + }, + "EdgeProperties":{ + "type":"list", + "member":{"shape":"String"} + }, + "EdgeStructure":{ + "type":"structure", + "members":{ + "count":{ + "shape":"Long", + "documentation":"The number of edges that have this specific structure.
" + }, + "edgeProperties":{ + "shape":"EdgeProperties", + "documentation":"A list of edge properties present in this specific structure.
" + } + }, + "documentation":"An edge structure.
" + }, + "EdgeStructures":{ + "type":"list", + "member":{"shape":"EdgeStructure"} + }, + "Encoding":{ + "type":"string", + "enum":["gzip"] + }, + "ExecuteFastResetInput":{ + "type":"structure", + "required":["action"], + "members":{ + "action":{ + "shape":"Action", + "documentation":"The fast reset action. One of the following values:
initiateDatabaseReset
– This action generates a unique token needed to actually perform the fast reset.
performDatabaseReset
– This action uses the token generated by the initiateDatabaseReset
action to actually perform the fast reset.
The fast-reset token to initiate the reset.
" + } + } + }, + "ExecuteFastResetOutput":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"String", + "documentation":"The status
is only returned for the performDatabaseReset
action, and indicates whether or not the fast reset rquest is accepted.
The payload
is only returned by the initiateDatabaseReset
action, and contains the unique token to use with the performDatabaseReset
action to make the reset occur.
The Gremlin explain query string.
", + "locationName":"gremlin" + } + } + }, + "ExecuteGremlinExplainQueryOutput":{ + "type":"structure", + "members":{ + "output":{ + "shape":"ReportAsText", + "documentation":"A text blob containing the Gremlin explain result, as described in Tuning Gremlin queries.
" + } + }, + "payload":"output" + }, + "ExecuteGremlinProfileQueryInput":{ + "type":"structure", + "required":["gremlinQuery"], + "members":{ + "gremlinQuery":{ + "shape":"String", + "documentation":"The Gremlin query string to profile.
", + "locationName":"gremlin" + }, + "results":{ + "shape":"Boolean", + "documentation":"If this flag is set to TRUE
, the query results are gathered and displayed as part of the profile report. If FALSE
, only the result count is displayed.
If non-zero, causes the results string to be truncated at that number of characters. If set to zero, the string contains all the results.
", + "locationName":"profile.chop" + }, + "serializer":{ + "shape":"String", + "documentation":"If non-null, the gathered results are returned in a serialized response message in the format specified by this parameter. See Gremlin profile API in Neptune for more information.
", + "locationName":"profile.serializer" + }, + "indexOps":{ + "shape":"Boolean", + "documentation":"If this flag is set to TRUE
, the results include a detailed report of all index operations that took place during query execution and serialization.
A text blob containing the Gremlin Profile result. See Gremlin profile API in Neptune for details.
" + } + }, + "payload":"output" + }, + "ExecuteGremlinQueryInput":{ + "type":"structure", + "required":["gremlinQuery"], + "members":{ + "gremlinQuery":{ + "shape":"String", + "documentation":"Using this API, you can run Gremlin queries in string format much as you can using the HTTP endpoint. The interface is compatible with whatever Gremlin version your DB cluster is using (see the Tinkerpop client section to determine which Gremlin releases your engine version supports).
", + "locationName":"gremlin" + }, + "serializer":{ + "shape":"String", + "documentation":"If non-null, the query results are returned in a serialized response message in the format specified by this parameter. See the GraphSON section in the TinkerPop documentation for a list of the formats that are currently supported.
", + "location":"header", + "locationName":"accept" + } + } + }, + "ExecuteGremlinQueryOutput":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"String", + "documentation":"The unique identifier of the Gremlin query.
" + }, + "status":{ + "shape":"GremlinQueryStatusAttributes", + "documentation":"The status of the Gremlin query.
" + }, + "result":{ + "shape":"Document", + "documentation":"The Gremlin query output from the server.
" + }, + "meta":{ + "shape":"Document", + "documentation":"Metadata about the Gremlin query.
" + } + } + }, + "ExecuteOpenCypherExplainQueryInput":{ + "type":"structure", + "required":[ + "openCypherQuery", + "explainMode" + ], + "members":{ + "openCypherQuery":{ + "shape":"String", + "documentation":"The openCypher query string.
", + "locationName":"query" + }, + "parameters":{ + "shape":"String", + "documentation":"The openCypher query parameters.
" + }, + "explainMode":{ + "shape":"OpenCypherExplainMode", + "documentation":"The openCypher explain
mode. Can be one of: static
, dynamic
, or details
.
A text blob containing the openCypher explain
results.
The openCypher query string to be executed.
", + "locationName":"query" + }, + "parameters":{ + "shape":"String", + "documentation":"The openCypher query parameters for query execution. See Examples of openCypher parameterized queries for more information.
" + } + } + }, + "ExecuteOpenCypherQueryOutput":{ + "type":"structure", + "required":["results"], + "members":{ + "results":{ + "shape":"Document", + "documentation":"The openCypherquery results.
" + } + } + }, + "ExpiredStreamException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a request attempts to access an stream that has expired.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "FailureByQueryException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a request fails.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "FastResetToken":{ + "type":"structure", + "members":{ + "token":{ + "shape":"String", + "documentation":"A UUID generated by the database in the initiateDatabaseReset
action, and then consumed by the performDatabaseReset
to reset the database.
A structure containing the fast reset token used to initiate a fast reset.
" + }, + "Format":{ + "type":"string", + "enum":[ + "csv", + "opencypher", + "ntriples", + "nquads", + "rdfxml", + "turtle" + ] + }, + "GetEngineStatusOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"Set to healthy
if the instance is not experiencing problems. If the instance is recovering from a crash or from being rebooted and there are active transactions running from the latest server shutdown, status is set to recovery
.
Set to the UTC time at which the current server process started.
" + }, + "dbEngineVersion":{ + "shape":"String", + "documentation":"Set to the Neptune engine version running on your DB cluster. If this engine version has been manually patched since it was released, the version number is prefixed by Patch-
.
Set to reader
if the instance is a read-replica, or to writer
if the instance is the primary instance.
Set to enabled
if the DFE engine is fully enabled, or to viaQueryHint
(the default) if the DFE engine is only used with queries that have the useDFE
query hint set to true
.
Contains information about the Gremlin query language available on your cluster. Specifically, it contains a version field that specifies the current TinkerPop version being used by the engine.
" + }, + "sparql":{ + "shape":"QueryLanguageVersion", + "documentation":"Contains information about the SPARQL query language available on your cluster. Specifically, it contains a version field that specifies the current SPARQL version being used by the engine.
" + }, + "opencypher":{ + "shape":"QueryLanguageVersion", + "documentation":"Contains information about the openCypher query language available on your cluster. Specifically, it contains a version field that specifies the current operCypher version being used by the engine.
" + }, + "labMode":{ + "shape":"StringValuedMap", + "documentation":"Contains Lab Mode settings being used by the engine.
" + }, + "rollingBackTrxCount":{ + "shape":"Integer", + "documentation":"If there are transactions being rolled back, this field is set to the number of such transactions. If there are none, the field doesn't appear at all.
" + }, + "rollingBackTrxEarliestStartTime":{ + "shape":"String", + "documentation":"Set to the start time of the earliest transaction being rolled back. If no transactions are being rolled back, the field doesn't appear at all.
" + }, + "features":{ + "shape":"DocumentValuedMap", + "documentation":"Contains status information about the features enabled on your DB cluster.
" + }, + "settings":{ + "shape":"StringValuedMap", + "documentation":"Contains information about the current settings on your DB cluster. For example, contains the current cluster query timeout setting (clusterQueryTimeoutInMs
).
The unique identifier that identifies the Gremlin query.
", + "location":"uri", + "locationName":"queryId" + } + } + }, + "GetGremlinQueryStatusOutput":{ + "type":"structure", + "members":{ + "queryId":{ + "shape":"String", + "documentation":"The ID of the query for which status is being returned.
" + }, + "queryString":{ + "shape":"String", + "documentation":"The Gremlin query string.
" + }, + "queryEvalStats":{ + "shape":"QueryEvalStats", + "documentation":"The evaluation status of the Gremlin query.
" + } + } + }, + "GetLoaderJobStatusInput":{ + "type":"structure", + "required":["loadId"], + "members":{ + "loadId":{ + "shape":"String", + "documentation":"The load ID of the load job to get the status of.
", + "location":"uri", + "locationName":"loadId" + }, + "details":{ + "shape":"Boolean", + "documentation":"Flag indicating whether or not to include details beyond the overall status (TRUE
or FALSE
; the default is FALSE
).
Flag indicating whether or not to include a list of errors encountered (TRUE
or FALSE
; the default is FALSE
).
The list of errors is paged. The page
and errorsPerPage
parameters allow you to page through all the errors.
The error page number (a positive integer; the default is 1
). Only valid when the errors
parameter is set to TRUE
.
The number of errors returned in each page (a positive integer; the default is 10
). Only valid when the errors
parameter set to TRUE
.
The HTTP response code for the request.
" + }, + "payload":{ + "shape":"Document", + "documentation":"Status information about the load job, in a layout that could look like this:
" + } + } + }, + "GetMLDataProcessingJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique identifier of the data-processing job to be retrieved.
", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "GetMLDataProcessingJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"Status of the data processing job.
" + }, + "id":{ + "shape":"String", + "documentation":"The unique identifier of this data-processing job.
" + }, + "processingJob":{ + "shape":"MlResourceDefinition", + "documentation":"Definition of the data processing job.
" + } + } + }, + "GetMLEndpointInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique identifier of the inference endpoint.
", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "GetMLEndpointOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"The status of the inference endpoint.
" + }, + "id":{ + "shape":"String", + "documentation":"The unique identifier of the inference endpoint.
" + }, + "endpoint":{ + "shape":"MlResourceDefinition", + "documentation":"The endpoint definition.
" + }, + "endpointConfig":{ + "shape":"MlConfigDefinition", + "documentation":"The endpoint configuration
" + } + } + }, + "GetMLModelTrainingJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique identifier of the model-training job to retrieve.
", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "GetMLModelTrainingJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"The status of the model training job.
" + }, + "id":{ + "shape":"String", + "documentation":"The unique identifier of this model-training job.
" + }, + "processingJob":{ + "shape":"MlResourceDefinition", + "documentation":"The data processing job.
" + }, + "hpoJob":{ + "shape":"MlResourceDefinition", + "documentation":"The HPO job.
" + }, + "modelTransformJob":{ + "shape":"MlResourceDefinition", + "documentation":"The model transform job.
" + }, + "mlModels":{ + "shape":"MlModels", + "documentation":"A list of the configurations of the ML models being used.
" + } + } + }, + "GetMLModelTransformJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique identifier of the model-transform job to be reetrieved.
", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "GetMLModelTransformJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"The status of the model-transform job.
" + }, + "id":{ + "shape":"String", + "documentation":"The unique identifier of the model-transform job to be retrieved.
" + }, + "baseProcessingJob":{ + "shape":"MlResourceDefinition", + "documentation":"The base data processing job.
" + }, + "remoteModelTransformJob":{ + "shape":"MlResourceDefinition", + "documentation":"The remote model transform job.
" + }, + "models":{ + "shape":"Models", + "documentation":"A list of the configuration information for the models being used.
" + } + } + }, + "GetOpenCypherQueryStatusInput":{ + "type":"structure", + "required":["queryId"], + "members":{ + "queryId":{ + "shape":"String", + "documentation":"The unique ID of the openCypher query for which to retrieve the query status.
", + "location":"uri", + "locationName":"queryId" + } + } + }, + "GetOpenCypherQueryStatusOutput":{ + "type":"structure", + "members":{ + "queryId":{ + "shape":"String", + "documentation":"The unique ID of the query for which status is being returned.
" + }, + "queryString":{ + "shape":"String", + "documentation":"The openCypher query string.
" + }, + "queryEvalStats":{ + "shape":"QueryEvalStats", + "documentation":"The openCypher query evaluation status.
" + } + } + }, + "GetPropertygraphStatisticsOutput":{ + "type":"structure", + "required":[ + "status", + "payload" + ], + "members":{ + "status":{ + "shape":"String", + "documentation":"The HTTP return code of the request. If the request succeeded, the code is 200. See Common error codes for DFE statistics request for a list of common errors.
" + }, + "payload":{ + "shape":"Statistics", + "documentation":"Statistics for property-graph data.
" + } + } + }, + "GetPropertygraphStreamInput":{ + "type":"structure", + "members":{ + "limit":{ + "shape":"GetPropertygraphStreamInputLimitLong", + "documentation":"Specifies the maximum number of records to return. There is also a size limit of 10 MB on the response that can't be modified and that takes precedence over the number of records specified in the limit
parameter. The response does include a threshold-breaching record if the 10 MB limit was reached.
The range for limit
is 1 to 100,000, with a default of 10.
Can be one of:
AT_SEQUENCE_NUMBER
– Indicates that reading should start from the event sequence number specified jointly by the commitNum
and opNum
parameters.
AFTER_SEQUENCE_NUMBER
– Indicates that reading should start right after the event sequence number specified jointly by the commitNum
and opNum
parameters.
TRIM_HORIZON
– Indicates that reading should start at the last untrimmed record in the system, which is the oldest unexpired (not yet deleted) record in the change-log stream.
LATEST
– Indicates that reading should start at the most recent record in the system, which is the latest unexpired (not yet deleted) record in the change-log stream.
The commit number of the starting record to read from the change-log stream. This parameter is required when iteratorType
isAT_SEQUENCE_NUMBER
or AFTER_SEQUENCE_NUMBER
, and ignored when iteratorType
is TRIM_HORIZON
or LATEST
.
The operation sequence number within the specified commit to start reading from in the change-log stream data. The default is 1
.
If set to TRUE, Neptune compresses the response using gzip encoding.
", + "location":"header", + "locationName":"Accept-Encoding" + } + } + }, + "GetPropertygraphStreamInputLimitLong":{ + "type":"long", + "box":true, + "max":100000, + "min":1 + }, + "GetPropertygraphStreamOutput":{ + "type":"structure", + "required":[ + "lastEventId", + "lastTrxTimestampInMillis", + "format", + "records", + "totalRecords" + ], + "members":{ + "lastEventId":{ + "shape":"StringValuedMap", + "documentation":"Sequence identifier of the last change in the stream response.
An event ID is composed of two fields: a commitNum
, which identifies a transaction that changed the graph, and an opNum
, which identifies a specific operation within that transaction:
The time at which the commit for the transaction was requested, in milliseconds from the Unix epoch.
", + "locationName":"lastTrxTimestamp" + }, + "format":{ + "shape":"String", + "documentation":"Serialization format for the change records being returned. Currently, the only supported value is PG_JSON
.
An array of serialized change-log stream records included in the response.
" + }, + "totalRecords":{ + "shape":"Integer", + "documentation":"The total number of records in the response.
" + } + } + }, + "GetPropertygraphSummaryInput":{ + "type":"structure", + "members":{ + "mode":{ + "shape":"GraphSummaryType", + "documentation":"Mode can take one of two values: BASIC
(the default), and DETAILED
.
The HTTP return code of the request. If the request succeeded, the code is 200.
", + "location":"statusCode" + }, + "payload":{ + "shape":"PropertygraphSummaryValueMap", + "documentation":"Payload containing the property graph summary response.
" + } + } + }, + "GetRDFGraphSummaryInput":{ + "type":"structure", + "members":{ + "mode":{ + "shape":"GraphSummaryType", + "documentation":"Mode can take one of two values: BASIC
(the default), and DETAILED
.
The HTTP return code of the request. If the request succeeded, the code is 200.
", + "location":"statusCode" + }, + "payload":{ + "shape":"RDFGraphSummaryValueMap", + "documentation":"Payload for an RDF graph summary response
" + } + } + }, + "GetSparqlStatisticsOutput":{ + "type":"structure", + "required":[ + "status", + "payload" + ], + "members":{ + "status":{ + "shape":"String", + "documentation":"The HTTP return code of the request. If the request succeeded, the code is 200. See Common error codes for DFE statistics request for a list of common errors.
" + }, + "payload":{ + "shape":"Statistics", + "documentation":"Statistics for RDF data.
" + } + } + }, + "GetSparqlStreamInput":{ + "type":"structure", + "members":{ + "limit":{ + "shape":"GetSparqlStreamInputLimitLong", + "documentation":"Specifies the maximum number of records to return. There is also a size limit of 10 MB on the response that can't be modified and that takes precedence over the number of records specified in the limit
parameter. The response does include a threshold-breaching record if the 10 MB limit was reached.
The range for limit
is 1 to 100,000, with a default of 10.
Can be one of:
AT_SEQUENCE_NUMBER
– Indicates that reading should start from the event sequence number specified jointly by the commitNum
and opNum
parameters.
AFTER_SEQUENCE_NUMBER
– Indicates that reading should start right after the event sequence number specified jointly by the commitNum
and opNum
parameters.
TRIM_HORIZON
– Indicates that reading should start at the last untrimmed record in the system, which is the oldest unexpired (not yet deleted) record in the change-log stream.
LATEST
– Indicates that reading should start at the most recent record in the system, which is the latest unexpired (not yet deleted) record in the change-log stream.
The commit number of the starting record to read from the change-log stream. This parameter is required when iteratorType
isAT_SEQUENCE_NUMBER
or AFTER_SEQUENCE_NUMBER
, and ignored when iteratorType
is TRIM_HORIZON
or LATEST
.
The operation sequence number within the specified commit to start reading from in the change-log stream data. The default is 1
.
If set to TRUE, Neptune compresses the response using gzip encoding.
", + "location":"header", + "locationName":"Accept-Encoding" + } + } + }, + "GetSparqlStreamInputLimitLong":{ + "type":"long", + "box":true, + "max":100000, + "min":1 + }, + "GetSparqlStreamOutput":{ + "type":"structure", + "required":[ + "lastEventId", + "lastTrxTimestampInMillis", + "format", + "records", + "totalRecords" + ], + "members":{ + "lastEventId":{ + "shape":"StringValuedMap", + "documentation":"Sequence identifier of the last change in the stream response.
An event ID is composed of two fields: a commitNum
, which identifies a transaction that changed the graph, and an opNum
, which identifies a specific operation within that transaction:
The time at which the commit for the transaction was requested, in milliseconds from the Unix epoch.
", + "locationName":"lastTrxTimestamp" + }, + "format":{ + "shape":"String", + "documentation":"Serialization format for the change records being returned. Currently, the only supported value is NQUADS
.
An array of serialized change-log stream records included in the response.
" + }, + "totalRecords":{ + "shape":"Integer", + "documentation":"The total number of records in the response.
" + } + } + }, + "GraphSummaryType":{ + "type":"string", + "enum":[ + "basic", + "detailed" + ] + }, + "GremlinQueries":{ + "type":"list", + "member":{"shape":"GremlinQueryStatus"} + }, + "GremlinQueryStatus":{ + "type":"structure", + "members":{ + "queryId":{ + "shape":"String", + "documentation":"The ID of the Gremlin query.
" + }, + "queryString":{ + "shape":"String", + "documentation":"The query string of the Gremlin query.
" + }, + "queryEvalStats":{ + "shape":"QueryEvalStats", + "documentation":"The query statistics of the Gremlin query.
" + } + }, + "documentation":"Captures the status of a Gremlin query (see the Gremlin query status API page).
" + }, + "GremlinQueryStatusAttributes":{ + "type":"structure", + "members":{ + "message":{ + "shape":"String", + "documentation":"The status message.
" + }, + "code":{ + "shape":"Integer", + "documentation":"The HTTP response code returned fro the Gremlin query request..
" + }, + "attributes":{ + "shape":"Document", + "documentation":"Attributes of the Gremlin query status.
" + } + }, + "documentation":"Contains status components of a Gremlin query.
" + }, + "IllegalArgumentException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when an argument in a request is not supported.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalFailureException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when the processing of the request failed unexpectedly.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidArgumentException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when an argument in a request has an invalid value.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidNumericDataException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when invalid numerical data is encountered when servicing a request.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request that includes an invalid parameter.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a parameter value is not valid.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "IteratorType":{ + "type":"string", + "enum":[ + "AT_SEQUENCE_NUMBER", + "AFTER_SEQUENCE_NUMBER", + "TRIM_HORIZON", + "LATEST" + ] + }, + "ListGremlinQueriesInput":{ + "type":"structure", + "members":{ + "includeWaiting":{ + "shape":"Boolean", + "documentation":"If set to TRUE
, the list returned includes waiting queries. The default is FALSE
;
The number of queries that have been accepted but not yet completed, including queries in the queue.
" + }, + "runningQueryCount":{ + "shape":"Integer", + "documentation":"The number of Gremlin queries currently running.
" + }, + "queries":{ + "shape":"GremlinQueries", + "documentation":"A list of the current queries.
" + } + } + }, + "ListLoaderJobsInput":{ + "type":"structure", + "members":{ + "limit":{ + "shape":"ListLoaderJobsInputLimitInteger", + "documentation":"The number of load IDs to list. Must be a positive integer greater than zero and not more than 100
(which is the default).
An optional parameter that can be used to exclude the load IDs of queued load requests when requesting a list of load IDs by setting the parameter to FALSE
. The default value is TRUE
.
Returns the status of the job list request.
" + }, + "payload":{ + "shape":"LoaderIdResult", + "documentation":"The requested list of job IDs.
" + } + } + }, + "ListMLDataProcessingJobsInput":{ + "type":"structure", + "members":{ + "maxItems":{ + "shape":"ListMLDataProcessingJobsInputMaxItemsInteger", + "documentation":"The maximum number of items to return (from 1 to 1024; the default is 10).
", + "location":"querystring", + "locationName":"maxItems" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "ListMLDataProcessingJobsInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":1024, + "min":1 + }, + "ListMLDataProcessingJobsOutput":{ + "type":"structure", + "members":{ + "ids":{ + "shape":"StringList", + "documentation":"A page listing data processing job IDs.
" + } + } + }, + "ListMLEndpointsInput":{ + "type":"structure", + "members":{ + "maxItems":{ + "shape":"ListMLEndpointsInputMaxItemsInteger", + "documentation":"The maximum number of items to return (from 1 to 1024; the default is 10.
", + "location":"querystring", + "locationName":"maxItems" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "ListMLEndpointsInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":1024, + "min":1 + }, + "ListMLEndpointsOutput":{ + "type":"structure", + "members":{ + "ids":{ + "shape":"StringList", + "documentation":"A page from the list of inference endpoint IDs.
" + } + } + }, + "ListMLModelTrainingJobsInput":{ + "type":"structure", + "members":{ + "maxItems":{ + "shape":"ListMLModelTrainingJobsInputMaxItemsInteger", + "documentation":"The maximum number of items to return (from 1 to 1024; the default is 10).
", + "location":"querystring", + "locationName":"maxItems" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "ListMLModelTrainingJobsInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":1024, + "min":1 + }, + "ListMLModelTrainingJobsOutput":{ + "type":"structure", + "members":{ + "ids":{ + "shape":"StringList", + "documentation":"A page of the list of model training job IDs.
" + } + } + }, + "ListMLModelTransformJobsInput":{ + "type":"structure", + "members":{ + "maxItems":{ + "shape":"ListMLModelTransformJobsInputMaxItemsInteger", + "documentation":"The maximum number of items to return (from 1 to 1024; the default is 10).
", + "location":"querystring", + "locationName":"maxItems" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "ListMLModelTransformJobsInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":1024, + "min":1 + }, + "ListMLModelTransformJobsOutput":{ + "type":"structure", + "members":{ + "ids":{ + "shape":"StringList", + "documentation":"A page from the list of model transform IDs.
" + } + } + }, + "ListOpenCypherQueriesInput":{ + "type":"structure", + "members":{ + "includeWaiting":{ + "shape":"Boolean", + "documentation":" When set to TRUE
and other parameters are not present, causes status information to be returned for waiting queries as well as for running queries.
The number of queries that have been accepted but not yet completed, including queries in the queue.
" + }, + "runningQueryCount":{ + "shape":"Integer", + "documentation":"The number of currently running openCypher queries.
" + }, + "queries":{ + "shape":"OpenCypherQueries", + "documentation":"A list of current openCypher queries.
" + } + } + }, + "LoadUrlAccessDeniedException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when access is denied to a specified load URL.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LoaderIdResult":{ + "type":"structure", + "members":{ + "loadIds":{ + "shape":"StringList", + "documentation":"A list of load IDs.
" + } + }, + "documentation":"Contains a list of load IDs.
" + }, + "Long":{ + "type":"long", + "box":true + }, + "LongValuedMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Long"} + }, + "LongValuedMapList":{ + "type":"list", + "member":{"shape":"LongValuedMap"} + }, + "MLResourceNotFoundException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a specified machine-learning resource could not be found.
", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "MalformedQueryException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the malformed query request.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a query is submitted that is syntactically incorrect or does not pass additional validation.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ManagePropertygraphStatisticsInput":{ + "type":"structure", + "members":{ + "mode":{ + "shape":"StatisticsAutoGenerationMode", + "documentation":"The statistics generation mode. One of: DISABLE_AUTOCOMPUTE
, ENABLE_AUTOCOMPUTE
, or REFRESH
, the last of which manually triggers DFE statistics generation.
The HTTP return code of the request. If the request succeeded, the code is 200.
" + }, + "payload":{ + "shape":"RefreshStatisticsIdMap", + "documentation":"This is only returned for refresh mode.
" + } + } + }, + "ManageSparqlStatisticsInput":{ + "type":"structure", + "members":{ + "mode":{ + "shape":"StatisticsAutoGenerationMode", + "documentation":"The statistics generation mode. One of: DISABLE_AUTOCOMPUTE
, ENABLE_AUTOCOMPUTE
, or REFRESH
, the last of which manually triggers DFE statistics generation.
The HTTP return code of the request. If the request succeeded, the code is 200.
" + }, + "payload":{ + "shape":"RefreshStatisticsIdMap", + "documentation":"This is only returned for refresh mode.
" + } + } + }, + "MemoryLimitExceededException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request that failed.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a request fails because of insufficient memory resources. The request can be retried.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "MethodNotAllowedException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when the HTTP method used by a request is not supported by the endpoint being used.
", + "error":{ + "httpStatusCode":405, + "senderFault":true + }, + "exception":true + }, + "MissingParameterException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in which the parameter is missing.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a required parameter is missing.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "MlConfigDefinition":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"The configuration name.
" + }, + "arn":{ + "shape":"String", + "documentation":"The ARN for the configuration.
" + } + }, + "documentation":"Contains a Neptune ML configuration.
" + }, + "MlModels":{ + "type":"list", + "member":{"shape":"MlConfigDefinition"} + }, + "MlResourceDefinition":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"The resource name.
" + }, + "arn":{ + "shape":"String", + "documentation":"The resource ARN.
" + }, + "status":{ + "shape":"String", + "documentation":"The resource status.
" + }, + "outputLocation":{ + "shape":"String", + "documentation":"The output location.
" + }, + "failureReason":{ + "shape":"String", + "documentation":"The failure reason, in case of a failure.
" + }, + "cloudwatchLogUrl":{ + "shape":"String", + "documentation":"The CloudWatch log URL for the resource.
" + } + }, + "documentation":"Defines a Neptune ML resource.
" + }, + "Mode":{ + "type":"string", + "enum":[ + "RESUME", + "NEW", + "AUTO" + ] + }, + "Models":{ + "type":"list", + "member":{"shape":"MlConfigDefinition"} + }, + "NodeLabels":{ + "type":"list", + "member":{"shape":"String"} + }, + "NodeProperties":{ + "type":"list", + "member":{"shape":"String"} + }, + "NodeStructure":{ + "type":"structure", + "members":{ + "count":{ + "shape":"Long", + "documentation":"Number of nodes that have this specific structure.
" + }, + "nodeProperties":{ + "shape":"NodeProperties", + "documentation":"A list of the node properties present in this specific structure.
" + }, + "distinctOutgoingEdgeLabels":{ + "shape":"OutgoingEdgeLabels", + "documentation":"A list of distinct outgoing edge labels present in this specific structure.
" + } + }, + "documentation":"A node structure.
" + }, + "NodeStructures":{ + "type":"list", + "member":{"shape":"NodeStructure"} + }, + "OpenCypherExplainMode":{ + "type":"string", + "enum":[ + "static", + "dynamic", + "details" + ] + }, + "OpenCypherQueries":{ + "type":"list", + "member":{"shape":"GremlinQueryStatus"} + }, + "OutgoingEdgeLabels":{ + "type":"list", + "member":{"shape":"String"} + }, + "Parallelism":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH", + "OVERSUBSCRIBE" + ] + }, + "ParsingException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a parsing issue is encountered.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PositiveInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "PreconditionsFailedException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a precondition for processing a request is not satisfied.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Predicates":{ + "type":"list", + "member":{"shape":"String"} + }, + "PropertygraphData":{ + "type":"structure", + "required":[ + "id", + "type", + "key", + "value" + ], + "members":{ + "id":{ + "shape":"String", + "documentation":"The ID of the Gremlin or openCypher element.
" + }, + "type":{ + "shape":"String", + "documentation":"The type of this Gremlin or openCypher element. Must be one of:
v1
- Vertex label for Gremlin, or node label for openCypher.
vp
- Vertex properties for Gremlin, or node properties for openCypher.
e
- Edge and edge label for Gremlin, or relationship and relationship type for openCypher.
ep
- Edge properties for Gremlin, or relationship properties for openCypher.
The property name. For element labels, this is label
.
This is a JSON object that contains a value field for the value itself, and a datatype field for the JSON data type of that value:
" + }, + "from":{ + "shape":"String", + "documentation":"If this is an edge (type = e
), the ID of the corresponding from
vertex or source node.
If this is an edge (type = e
), the ID of the corresponding to
vertex or target node.
A Gremlin or openCypher change record.
" + }, + "PropertygraphRecord":{ + "type":"structure", + "required":[ + "commitTimestampInMillis", + "eventId", + "data", + "op" + ], + "members":{ + "commitTimestampInMillis":{ + "shape":"Long", + "documentation":"The time at which the commit for the transaction was requested, in milliseconds from the Unix epoch.
", + "locationName":"commitTimestamp" + }, + "eventId":{ + "shape":"StringValuedMap", + "documentation":"The sequence identifier of the stream change record.
" + }, + "data":{ + "shape":"PropertygraphData", + "documentation":"The serialized Gremlin or openCypher change record.
" + }, + "op":{ + "shape":"String", + "documentation":"The operation that created the change.
" + }, + "isLastOp":{ + "shape":"Boolean", + "documentation":"Only present if this operation is the last one in its transaction. If present, it is set to true. It is useful for ensuring that an entire transaction is consumed.
" + } + }, + "documentation":"Structure of a property graph record.
" + }, + "PropertygraphRecordsList":{ + "type":"list", + "member":{"shape":"PropertygraphRecord"} + }, + "PropertygraphSummary":{ + "type":"structure", + "members":{ + "numNodes":{ + "shape":"Long", + "documentation":"The number of nodes in the graph.
" + }, + "numEdges":{ + "shape":"Long", + "documentation":"The number of edges in the graph.
" + }, + "numNodeLabels":{ + "shape":"Long", + "documentation":"The number of distinct node labels in the graph.
" + }, + "numEdgeLabels":{ + "shape":"Long", + "documentation":"The number of distinct edge labels in the graph.
" + }, + "nodeLabels":{ + "shape":"NodeLabels", + "documentation":"A list of the distinct node labels in the graph.
" + }, + "edgeLabels":{ + "shape":"EdgeLabels", + "documentation":"A list of the distinct edge labels in the graph.
" + }, + "numNodeProperties":{ + "shape":"Long", + "documentation":"A list of the distinct node properties in the graph, along with the count of nodes where each property is used.
" + }, + "numEdgeProperties":{ + "shape":"Long", + "documentation":"The number of distinct edge properties in the graph.
" + }, + "nodeProperties":{ + "shape":"LongValuedMapList", + "documentation":"The number of distinct node properties in the graph.
" + }, + "edgeProperties":{ + "shape":"LongValuedMapList", + "documentation":"A list of the distinct edge properties in the graph, along with the count of edges where each property is used.
" + }, + "totalNodePropertyValues":{ + "shape":"Long", + "documentation":"The total number of usages of all node properties.
" + }, + "totalEdgePropertyValues":{ + "shape":"Long", + "documentation":"The total number of usages of all edge properties.
" + }, + "nodeStructures":{ + "shape":"NodeStructures", + "documentation":"This field is only present when the requested mode is DETAILED
. It contains a list of node structures.
This field is only present when the requested mode is DETAILED
. It contains a list of edge structures.
The graph summary API returns a read-only list of node and edge labels and property keys, along with counts of nodes, edges, and properties. See Graph summary response for a property graph (PG).
" + }, + "PropertygraphSummaryValueMap":{ + "type":"structure", + "members":{ + "version":{ + "shape":"String", + "documentation":"The version of this graph summary response.
" + }, + "lastStatisticsComputationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The timestamp, in ISO 8601 format, of the time at which Neptune last computed statistics.
" + }, + "graphSummary":{ + "shape":"PropertygraphSummary", + "documentation":"The graph summary.
" + } + }, + "documentation":"Payload for the property graph summary response.
" + }, + "QueryEvalStats":{ + "type":"structure", + "members":{ + "waited":{ + "shape":"Integer", + "documentation":"Indicates how long the query waited, in milliseconds.
" + }, + "elapsed":{ + "shape":"Integer", + "documentation":"The number of milliseconds the query has been running so far.
" + }, + "cancelled":{ + "shape":"Boolean", + "documentation":"Set to TRUE
if the query was cancelled, or FALSE otherwise.
The number of subqueries in this query.
" + } + }, + "documentation":"Structure to capture query statistics such as how many queries are running, accepted or waiting and their details.
" + }, + "QueryLanguageVersion":{ + "type":"structure", + "required":["version"], + "members":{ + "version":{ + "shape":"String", + "documentation":"The version of the query language.
" + } + }, + "documentation":"Structure for expressing the query language version.
" + }, + "QueryLimitExceededException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request which exceeded the limit.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when the number of active queries exceeds what the server can process. The query in question can be retried when the system is less busy.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "QueryLimitException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request that exceeded the limit.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when the size of a query exceeds the system limit.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueryTooLargeException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request that is too large.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when the body of a query is too large.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "RDFGraphSummary":{ + "type":"structure", + "members":{ + "numDistinctSubjects":{ + "shape":"Long", + "documentation":"The number of distinct subjects in the graph.
" + }, + "numDistinctPredicates":{ + "shape":"Long", + "documentation":"The number of distinct predicates in the graph.
" + }, + "numQuads":{ + "shape":"Long", + "documentation":"The number of quads in the graph.
" + }, + "numClasses":{ + "shape":"Long", + "documentation":"The number of classes in the graph.
" + }, + "classes":{ + "shape":"Classes", + "documentation":"A list of the classes in the graph.
" + }, + "predicates":{ + "shape":"LongValuedMapList", + "documentation":"\"A list of predicates in the graph, along with the predicate counts.
" + }, + "subjectStructures":{ + "shape":"SubjectStructures", + "documentation":"This field is only present when the request mode is DETAILED
. It contains a list of subject structures.
The RDF graph summary API returns a read-only list of classes and predicate keys, along with counts of quads, subjects, and predicates.
" + }, + "RDFGraphSummaryValueMap":{ + "type":"structure", + "members":{ + "version":{ + "shape":"String", + "documentation":"The version of this graph summary response.
" + }, + "lastStatisticsComputationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The timestamp, in ISO 8601 format, of the time at which Neptune last computed statistics.
" + }, + "graphSummary":{ + "shape":"RDFGraphSummary", + "documentation":"The graph summary of an RDF graph. See Graph summary response for an RDF graph.
" + } + }, + "documentation":"Payload for an RDF graph summary response.
" + }, + "ReadOnlyViolationException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in which the parameter is missing.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a request attempts to write to a read-only resource.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "RefreshStatisticsIdMap":{ + "type":"structure", + "members":{ + "statisticsId":{ + "shape":"String", + "documentation":"The ID of the statistics generation run that is currently occurring.
" + } + }, + "documentation":"Statistics for REFRESH
mode.
A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when there is a problem accessing Amazon S3.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "ServerShutdownException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when the server shuts down while processing a request.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "SparqlData":{ + "type":"structure", + "required":["stmt"], + "members":{ + "stmt":{ + "shape":"String", + "documentation":"Holds an N-QUADS statement expressing the changed quad.
" + } + }, + "documentation":"Neptune logs are converted to SPARQL quads in the graph using the Resource Description Framework (RDF) N-QUADS language defined in the W3C RDF 1.1 N-Quads specification
" + }, + "SparqlRecord":{ + "type":"structure", + "required":[ + "commitTimestampInMillis", + "eventId", + "data", + "op" + ], + "members":{ + "commitTimestampInMillis":{ + "shape":"Long", + "documentation":"The time at which the commit for the transaction was requested, in milliseconds from the Unix epoch.
", + "locationName":"commitTimestamp" + }, + "eventId":{ + "shape":"StringValuedMap", + "documentation":"The sequence identifier of the stream change record.
" + }, + "data":{ + "shape":"SparqlData", + "documentation":"The serialized SPARQL change record. The serialization formats of each record are described in more detail in Serialization Formats in Neptune Streams.
" + }, + "op":{ + "shape":"String", + "documentation":"The operation that created the change.
" + }, + "isLastOp":{ + "shape":"Boolean", + "documentation":"Only present if this operation is the last one in its transaction. If present, it is set to true. It is useful for ensuring that an entire transaction is consumed.
" + } + }, + "documentation":"A serialized SPARQL stream record capturing a change-log entry for the RDF graph.
" + }, + "SparqlRecordsList":{ + "type":"list", + "member":{"shape":"SparqlRecord"} + }, + "StartLoaderJobInput":{ + "type":"structure", + "required":[ + "source", + "format", + "s3BucketRegion", + "iamRoleArn" + ], + "members":{ + "source":{ + "shape":"String", + "documentation":"The source
parameter accepts an S3 URI that identifies a single file, multiple files, a folder, or multiple folders. Neptune loads every data file in any folder that is specified.
The URI can be in any of the following formats.
s3://(bucket_name)/(object-key-name)
https://s3.amazonaws.com/(bucket_name)/(object-key-name)
https://s3.us-east-1.amazonaws.com/(bucket_name)/(object-key-name)
The object-key-name
element of the URI is equivalent to the prefix parameter in an S3 ListObjects API call. It identifies all the objects in the specified S3 bucket whose names begin with that prefix. That can be a single file or folder, or multiple files and/or folders.
The specified folder or folders can contain multiple vertex files and multiple edge files.
" + }, + "format":{ + "shape":"Format", + "documentation":"The format of the data. For more information about data formats for the Neptune Loader
command, see Load Data Formats.
Allowed values
csv
for the Gremlin CSV data format.
opencypher
for the openCypher CSV data format.
ntriples
for the N-Triples RDF data format.
nquads
for the N-Quads RDF data format.
rdfxml
for the RDF\\XML RDF data format.
turtle
for the Turtle RDF data format.
The Amazon region of the S3 bucket. This must match the Amazon Region of the DB cluster.
", + "locationName":"region" + }, + "iamRoleArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) for an IAM role to be assumed by the Neptune DB instance for access to the S3 bucket. The IAM role ARN provided here should be attached to the DB cluster (see Adding the IAM Role to an Amazon Neptune Cluster.
" + }, + "mode":{ + "shape":"Mode", + "documentation":"The load job mode.
Allowed values: RESUME
, NEW
, AUTO
.
Default value: AUTO
.
RESUME
– In RESUME mode, the loader looks for a previous load from this source, and if it finds one, resumes that load job. If no previous load job is found, the loader stops.
The loader avoids reloading files that were successfully loaded in a previous job. It only tries to process failed files. If you dropped previously loaded data from your Neptune cluster, that data is not reloaded in this mode. If a previous load job loaded all files from the same source successfully, nothing is reloaded, and the loader returns success.
NEW
– In NEW mode, the creates a new load request regardless of any previous loads. You can use this mode to reload all the data from a source after dropping previously loaded data from your Neptune cluster, or to load new data available at the same source.
AUTO
– In AUTO mode, the loader looks for a previous load job from the same source, and if it finds one, resumes that job, just as in RESUME
mode.
If the loader doesn't find a previous load job from the same source, it loads all data from the source, just as in NEW
mode.
failOnError
– A flag to toggle a complete stop on an error.
Allowed values: \"TRUE\"
, \"FALSE\"
.
Default value: \"TRUE\"
.
When this parameter is set to \"FALSE\"
, the loader tries to load all the data in the location specified, skipping any entries with errors.
When this parameter is set to \"TRUE\"
, the loader stops as soon as it encounters an error. Data loaded up to that point persists.
The optional parallelism
parameter can be set to reduce the number of threads used by the bulk load process.
Allowed values:
LOW
– The number of threads used is the number of available vCPUs divided by 8.
MEDIUM
– The number of threads used is the number of available vCPUs divided by 2.
HIGH
– The number of threads used is the same as the number of available vCPUs.
OVERSUBSCRIBE
– The number of threads used is the number of available vCPUs multiplied by 2. If this value is used, the bulk loader takes up all available resources.
This does not mean, however, that the OVERSUBSCRIBE
setting results in 100% CPU utilization. Because the load operation is I/O bound, the highest CPU utilization to expect is in the 60% to 70% range.
Default value: HIGH
The parallelism
setting can sometimes result in a deadlock between threads when loading openCypher data. When this happens, Neptune returns the LOAD_DATA_DEADLOCK
error. You can generally fix the issue by setting parallelism
to a lower setting and retrying the load command.
parserConfiguration
– An optional object with additional parser configuration values. Each of the child parameters is also optional:
namedGraphUri
– The default graph for all RDF formats when no graph is specified (for non-quads formats and NQUAD entries with no graph).
The default is https://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph
.
baseUri
– The base URI for RDF/XML and Turtle formats.
The default is https://aws.amazon.com/neptune/default
.
allowEmptyStrings
– Gremlin users need to be able to pass empty string values(\"\") as node and edge properties when loading CSV data. If allowEmptyStrings
is set to false
(the default), such empty strings are treated as nulls and are not loaded.
If allowEmptyStrings
is set to true
, the loader treats empty strings as valid property values and loads them accordingly.
updateSingleCardinalityProperties
is an optional parameter that controls how the bulk loader treats a new value for single-cardinality vertex or edge properties. This is not supported for loading openCypher data.
Allowed values: \"TRUE\"
, \"FALSE\"
.
Default value: \"FALSE\"
.
By default, or when updateSingleCardinalityProperties
is explicitly set to \"FALSE\"
, the loader treats a new value as an error, because it violates single cardinality.
When updateSingleCardinalityProperties
is set to \"TRUE\"
, on the other hand, the bulk loader replaces the existing value with the new one. If multiple edge or single-cardinality vertex property values are provided in the source file(s) being loaded, the final value at the end of the bulk load could be any one of those new values. The loader only guarantees that the existing value has been replaced by one of the new ones.
This is an optional flag parameter that indicates whether the load request can be queued up or not.
You don't have to wait for one load job to complete before issuing the next one, because Neptune can queue up as many as 64 jobs at a time, provided that their queueRequest
parameters are all set to \"TRUE\"
.
If the queueRequest
parameter is omitted or set to \"FALSE\"
, the load request will fail if another load job is already running.
Allowed values: \"TRUE\"
, \"FALSE\"
.
Default value: \"FALSE\"
.
This is an optional parameter that can make a queued load request contingent on the successful completion of one or more previous jobs in the queue.
Neptune can queue up as many as 64 load requests at a time, if their queueRequest
parameters are set to \"TRUE\"
. The dependencies
parameter lets you make execution of such a queued request dependent on the successful completion of one or more specified previous requests in the queue.
For example, if load Job-A
and Job-B
are independent of each other, but load Job-C
needs Job-A
and Job-B
to be finished before it begins, proceed as follows:
Submit load-job-A
and load-job-B
one after another in any order, and save their load-ids.
Submit load-job-C
with the load-ids of the two jobs in its dependencies
field:
Because of the dependencies
parameter, the bulk loader will not start Job-C
until Job-A
and Job-B
have completed successfully. If either one of them fails, Job-C will not be executed, and its status will be set to LOAD_FAILED_BECAUSE_DEPENDENCY_NOT_SATISFIED
.
You can set up multiple levels of dependency in this way, so that the failure of one job will cause all requests that are directly or indirectly dependent on it to be cancelled.
" + }, + "userProvidedEdgeIds":{ + "shape":"Boolean", + "documentation":"This parameter is required only when loading openCypher data that contains relationship IDs. It must be included and set to True
when openCypher relationship IDs are explicitly provided in the load data (recommended).
When userProvidedEdgeIds
is absent or set to True
, an :ID
column must be present in every relationship file in the load.
When userProvidedEdgeIds
is present and set to False
, relationship files in the load must not contain an :ID
column. Instead, the Neptune loader automatically generates an ID for each relationship.
It's useful to provide relationship IDs explicitly so that the loader can resume loading after error in the CSV data have been fixed, without having to reload any relationships that have already been loaded. If relationship IDs have not been explicitly assigned, the loader cannot resume a failed load if any relationship file has had to be corrected, and must instead reload all the relationships.
" + } + } + }, + "StartLoaderJobOutput":{ + "type":"structure", + "required":[ + "status", + "payload" + ], + "members":{ + "status":{ + "shape":"String", + "documentation":"The HTTP return code indicating the status of the load job.
" + }, + "payload":{ + "shape":"StringValuedMap", + "documentation":"Contains a loadId
name-value pair that provides an identifier for the load operation.
A unique identifier for the new job. The default is an autogenerated UUID.
" + }, + "previousDataProcessingJobId":{ + "shape":"String", + "documentation":"The job ID of a completed data processing job run on an earlier version of the data.
" + }, + "inputDataS3Location":{ + "shape":"String", + "documentation":"The URI of the Amazon S3 location where you want SageMaker to download the data needed to run the data processing job.
" + }, + "processedDataS3Location":{ + "shape":"String", + "documentation":"The URI of the Amazon S3 location where you want SageMaker to save the results of a data processing job.
" + }, + "sagemakerIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role for SageMaker execution. This must be listed in your DB cluster parameter group or an error will occur.
" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf. This must be listed in your DB cluster parameter group or an error will occur.
" + }, + "processingInstanceType":{ + "shape":"String", + "documentation":"The type of ML instance used during data processing. Its memory should be large enough to hold the processed dataset. The default is the smallest ml.r5 type whose memory is ten times larger than the size of the exported graph data on disk.
" + }, + "processingInstanceVolumeSizeInGB":{ + "shape":"Integer", + "documentation":"The disk volume size of the processing instance. Both input data and processed data are stored on disk, so the volume size must be large enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML chooses the volume size automatically based on the data size.
" + }, + "processingTimeOutInSeconds":{ + "shape":"Integer", + "documentation":"Timeout in seconds for the data processing job. The default is 86,400 (1 day).
" + }, + "modelType":{ + "shape":"String", + "documentation":"One of the two model types that Neptune ML currently supports: heterogeneous graph models (heterogeneous
), and knowledge graph (kge
). The default is none. If not specified, Neptune ML chooses the model type automatically based on the data.
A data specification file that describes how to load the exported graph data for training. The file is automatically generated by the Neptune export toolkit. The default is training-data-configuration.json
.
The IDs of the subnets in the Neptune VPC. The default is None.
" + }, + "securityGroupIds":{ + "shape":"StringList", + "documentation":"The VPC security group IDs. The default is None.
" + }, + "volumeEncryptionKMSKey":{ + "shape":"String", + "documentation":"The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.
" + }, + "s3OutputEncryptionKMSKey":{ + "shape":"String", + "documentation":"The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.
" + } + } + }, + "StartMLDataProcessingJobOutput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique ID of the new data processing job.
" + }, + "arn":{ + "shape":"String", + "documentation":"The ARN of the data processing job.
" + }, + "creationTimeInMillis":{ + "shape":"Long", + "documentation":"The time it took to create the new processing job, in milliseconds.
" + } + } + }, + "StartMLModelTrainingJobInput":{ + "type":"structure", + "required":[ + "dataProcessingJobId", + "trainModelS3Location" + ], + "members":{ + "id":{ + "shape":"String", + "documentation":"A unique identifier for the new job. The default is An autogenerated UUID.
" + }, + "previousModelTrainingJobId":{ + "shape":"String", + "documentation":"The job ID of a completed model-training job that you want to update incrementally based on updated data.
" + }, + "dataProcessingJobId":{ + "shape":"String", + "documentation":"The job ID of the completed data-processing job that has created the data that the training will work with.
" + }, + "trainModelS3Location":{ + "shape":"String", + "documentation":"The location in Amazon S3 where the model artifacts are to be stored.
" + }, + "sagemakerIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role for SageMaker execution.This must be listed in your DB cluster parameter group or an error will occur.
" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
" + }, + "baseProcessingInstanceType":{ + "shape":"String", + "documentation":"The type of ML instance used in preparing and managing training of ML models. This is a CPU instance chosen based on memory requirements for processing the training data and model.
" + }, + "trainingInstanceType":{ + "shape":"String", + "documentation":"The type of ML instance used for model training. All Neptune ML models support CPU, GPU, and multiGPU training. The default is ml.p3.2xlarge
. Choosing the right instance type for training depends on the task type, graph size, and your budget.
The disk volume size of the training instance. Both input data and the output model are stored on disk, so the volume size must be large enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML selects a disk volume size based on the recommendation generated in the data processing step.
" + }, + "trainingTimeOutInSeconds":{ + "shape":"Integer", + "documentation":"Timeout in seconds for the training job. The default is 86,400 (1 day).
" + }, + "maxHPONumberOfTrainingJobs":{ + "shape":"Integer", + "documentation":"Maximum total number of training jobs to start for the hyperparameter tuning job. The default is 2. Neptune ML automatically tunes the hyperparameters of the machine learning model. To obtain a model that performs well, use at least 10 jobs (in other words, set maxHPONumberOfTrainingJobs
to 10). In general, the more tuning runs, the better the results.
Maximum number of parallel training jobs to start for the hyperparameter tuning job. The default is 2. The number of parallel jobs you can run is limited by the available resources on your training instance.
" + }, + "subnets":{ + "shape":"StringList", + "documentation":"The IDs of the subnets in the Neptune VPC. The default is None.
" + }, + "securityGroupIds":{ + "shape":"StringList", + "documentation":"The VPC security group IDs. The default is None.
" + }, + "volumeEncryptionKMSKey":{ + "shape":"String", + "documentation":"The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.
" + }, + "s3OutputEncryptionKMSKey":{ + "shape":"String", + "documentation":"The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.
" + }, + "enableManagedSpotTraining":{ + "shape":"Boolean", + "documentation":"Optimizes the cost of training machine-learning models by using Amazon Elastic Compute Cloud spot instances. The default is False
.
The configuration for custom model training. This is a JSON object.
" + } + } + }, + "StartMLModelTrainingJobOutput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique ID of the new model training job.
" + }, + "arn":{ + "shape":"String", + "documentation":"The ARN of the new model training job.
" + }, + "creationTimeInMillis":{ + "shape":"Long", + "documentation":"The model training job creation time, in milliseconds.
" + } + } + }, + "StartMLModelTransformJobInput":{ + "type":"structure", + "required":["modelTransformOutputS3Location"], + "members":{ + "id":{ + "shape":"String", + "documentation":"A unique identifier for the new job. The default is an autogenerated UUID.
" + }, + "dataProcessingJobId":{ + "shape":"String", + "documentation":"The job ID of a completed data-processing job. You must include either dataProcessingJobId
and a mlModelTrainingJobId
, or a trainingJobName
.
The job ID of a completed model-training job. You must include either dataProcessingJobId
and a mlModelTrainingJobId
, or a trainingJobName
.
The name of a completed SageMaker training job. You must include either dataProcessingJobId
and a mlModelTrainingJobId
, or a trainingJobName
.
The location in Amazon S3 where the model artifacts are to be stored.
" + }, + "sagemakerIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role for SageMaker execution. This must be listed in your DB cluster parameter group or an error will occur.
" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.
" + }, + "customModelTransformParameters":{ + "shape":"CustomModelTransformParameters", + "documentation":"Configuration information for a model transform using a custom model. The customModelTransformParameters
object contains the following fields, which must have values compatible with the saved model parameters from the training job:
The type of ML instance used in preparing and managing training of ML models. This is an ML compute instance chosen based on memory requirements for processing the training data and model.
" + }, + "baseProcessingInstanceVolumeSizeInGB":{ + "shape":"Integer", + "documentation":"The disk volume size of the training instance in gigabytes. The default is 0. Both input data and the output model are stored on disk, so the volume size must be large enough to hold both data sets. If not specified or 0, Neptune ML selects a disk volume size based on the recommendation generated in the data processing step.
" + }, + "subnets":{ + "shape":"StringList", + "documentation":"The IDs of the subnets in the Neptune VPC. The default is None.
" + }, + "securityGroupIds":{ + "shape":"StringList", + "documentation":"The VPC security group IDs. The default is None.
" + }, + "volumeEncryptionKMSKey":{ + "shape":"String", + "documentation":"The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.
" + }, + "s3OutputEncryptionKMSKey":{ + "shape":"String", + "documentation":"The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.
" + } + } + }, + "StartMLModelTransformJobOutput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique ID of the new model transform job.
" + }, + "arn":{ + "shape":"String", + "documentation":"The ARN of the model transform job.
" + }, + "creationTimeInMillis":{ + "shape":"Long", + "documentation":"The creation time of the model transform job, in milliseconds.
" + } + } + }, + "Statistics":{ + "type":"structure", + "members":{ + "autoCompute":{ + "shape":"Boolean", + "documentation":"Indicates whether or not automatic statistics generation is enabled.
" + }, + "active":{ + "shape":"Boolean", + "documentation":"Indicates whether or not DFE statistics generation is enabled at all.
" + }, + "statisticsId":{ + "shape":"String", + "documentation":"Reports the ID of the current statistics generation run. A value of -1 indicates that no statistics have been generated.
" + }, + "date":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The UTC time at which DFE statistics have most recently been generated.
" + }, + "note":{ + "shape":"String", + "documentation":"A note about problems in the case where statistics are invalid.
" + }, + "signatureInfo":{ + "shape":"StatisticsSummary", + "documentation":"A StatisticsSummary structure that contains:
signatureCount
- The total number of signatures across all characteristic sets.
instanceCount
- The total number of characteristic-set instances.
predicateCount
- The total number of unique predicates.
Contains statistics information. The DFE engine uses information about the data in your Neptune graph to make effective trade-offs when planning query execution. This information takes the form of statistics that include so-called characteristic sets and predicate statistics that can guide query planning. See Managing statistics for the Neptune DFE to use.
" + }, + "StatisticsAutoGenerationMode":{ + "type":"string", + "enum":[ + "disableAutoCompute", + "enableAutoCompute", + "refresh" + ] + }, + "StatisticsNotAvailableException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when statistics needed to satisfy a request are not available.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "StatisticsSummary":{ + "type":"structure", + "members":{ + "signatureCount":{ + "shape":"Integer", + "documentation":"The total number of signatures across all characteristic sets.
" + }, + "instanceCount":{ + "shape":"Integer", + "documentation":"The total number of characteristic-set instances.
" + }, + "predicateCount":{ + "shape":"Integer", + "documentation":"The total number of unique predicates.
" + } + }, + "documentation":"Information about the characteristic sets generated in the statistics.
" + }, + "StreamRecordsNotFoundException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when stream records requested by a query cannot be found.
", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "StringValuedMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "SubjectStructure":{ + "type":"structure", + "members":{ + "count":{ + "shape":"Long", + "documentation":"Number of occurrences of this specific structure.
" + }, + "predicates":{ + "shape":"Predicates", + "documentation":"A list of predicates present in this specific structure.
" + } + }, + "documentation":"A subject structure.
" + }, + "SubjectStructures":{ + "type":"list", + "member":{"shape":"SubjectStructure"} + }, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "ThrottlingException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request that could not be processed for this reason.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when the rate of requests exceeds the maximum throughput. Requests can be retried after encountering this exception.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "TimeLimitExceededException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request that could not be processed for this reason.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when the an operation exceeds the time limit allowed for it.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "TooManyRequestsException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request that could not be processed for this reason.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when the number of requests being processed exceeds the limit.
", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "UnsupportedOperationException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"A detailed message describing the problem.
" + }, + "requestId":{ + "shape":"String", + "documentation":"The ID of the request in question.
" + }, + "code":{ + "shape":"String", + "documentation":"The HTTP status code returned with the exception.
" + } + }, + "documentation":"Raised when a request attempts to initiate an operation that is not supported.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"The Amazon Neptune data API provides SDK support for more than 40 of Neptune's data operations, including data loading, query execution, data inquiry, and machine learning. It supports all three Neptune query languages (Gremlin, openCypher and SPARQL), and is available in all SDK languages. It automatically signs API requests and greatly simplifies integrating Neptune into your applications.
" +} diff --git a/botocore/data/network-firewall/2020-11-12/endpoint-rule-set-1.json b/botocore/data/network-firewall/2020-11-12/endpoint-rule-set-1.json index d7fab30461..233c4205f8 100644 --- a/botocore/data/network-firewall/2020-11-12/endpoint-rule-set-1.json +++ b/botocore/data/network-firewall/2020-11-12/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://network-firewall-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://network-firewall-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://network-firewall-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://network-firewall-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://network-firewall.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://network-firewall.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://network-firewall.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://network-firewall.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/botocore/data/network-firewall/2020-11-12/service-2.json b/botocore/data/network-firewall/2020-11-12/service-2.json index 1c4d6ab4c7..e476421363 100644 --- a/botocore/data/network-firewall/2020-11-12/service-2.json +++ b/botocore/data/network-firewall/2020-11-12/service-2.json @@ -114,9 +114,11 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"}, - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"LimitExceededException"}, + {"shape":"InsufficientCapacityException"} ], - "documentation":"Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration contains the Certificate Manager certificate references that Network Firewall uses to decrypt and re-encrypt inbound traffic.
After you create a TLS inspection configuration, you associate it with a firewall policy.
To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.
To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.
To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.
For more information about TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" + "documentation":"Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration contains the Certificate Manager certificate references that Network Firewall uses to decrypt and re-encrypt inbound traffic.
After you create a TLS inspection configuration, you associate it with a new firewall policy.
To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.
To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.
To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.
For more information about TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" }, "DeleteFirewall":{ "name":"DeleteFirewall", @@ -1029,7 +1031,7 @@ }, "TLSInspectionConfiguration":{ "shape":"TLSInspectionConfiguration", - "documentation":"The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.
Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.
To use a TLS inspection configuration, you add it to a Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" + "documentation":"The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.
Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.
To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" }, "Description":{ "shape":"Description", @@ -1410,7 +1412,7 @@ }, "TLSInspectionConfiguration":{ "shape":"TLSInspectionConfiguration", - "documentation":"The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.
Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.
To use a TLS inspection configuration, you add it to a Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" + "documentation":"The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.
Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.
To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" }, "TLSInspectionConfigurationResponse":{ "shape":"TLSInspectionConfigurationResponse", @@ -2205,7 +2207,7 @@ }, "PaginationToken":{ "type":"string", - "max":2048, + "max":4096, "min":1, "pattern":"[0-9A-Za-z:\\/+=]+$" }, @@ -2547,11 +2549,11 @@ "members":{ "Keyword":{ "shape":"Keyword", - "documentation":"" + "documentation":"The keyword for the Suricata compatible rule option. You must include a sid
(signature ID), and can optionally include other keywords. For information about Suricata compatible keywords, see Rule options in the Suricata documentation.
The settings of the Suricata compatible rule option. Rule options have zero or more setting values, and the number of possible and required settings depends on the Keyword
. For more information about the settings for specific options, see Rule options.
Additional settings for a stateful rule. This is part of the StatefulRule configuration.
" @@ -2604,7 +2606,7 @@ }, "StatefulRules":{ "shape":"StatefulRules", - "documentation":"An array of individual stateful rules inspection criteria to be used together in a stateful rule group. Use this option to specify simple Suricata rules with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules
format, see Rules Format.
An array of individual stateful rules inspection criteria to be used together in a stateful rule group. Use this option to specify simple Suricata rules with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules
format, see Rules Format.
Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow.
The actions for a stateful rule are defined as follows:
PASS - Permits the packets to go to the intended destination.
DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.
ALERT - Permits the packets to go to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.
You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT
action, verify in the logs that the rule is filtering as you want, then change the action to DROP
.
REJECT - Drops TCP traffic that matches the conditions of the stateful rule, and sends a TCP reset packet back to sender of the packet. A TCP reset packet is a packet with no payload and a RST
bit contained in the TCP header flags. Also sends an alert log mesage if alert logging is configured in the Firewall LoggingConfiguration.
REJECT
isn't currently available for use with IMAP and FTP protocols.
Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow.
The actions for a stateful rule are defined as follows:
PASS - Permits the packets to go to the intended destination.
DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.
ALERT - Permits the packets to go to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.
You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT
action, verify in the logs that the rule is filtering as you want, then change the action to DROP
.
Additional options for the rule. These are the Suricata RuleOptions
settings.
A single Suricata rules specification, for use in a stateful rule group. Use this option to specify a simple Suricata rule with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules
format, see Rules Format.
A single Suricata rules specification, for use in a stateful rule group. Use this option to specify a simple Suricata rule with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules
format, see Rules Format.
Lists the server certificate configurations that are associated with the TLS configuration.
" } }, - "documentation":"The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.
Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.
To use a TLS inspection configuration, you add it to a Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" + "documentation":"The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.
Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.
To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" }, "TLSInspectionConfigurationMetadata":{ "type":"structure", @@ -3419,7 +3421,7 @@ }, "FirewallPolicy":{ "shape":"FirewallPolicy", - "documentation":"The updated firewall policy to use for the firewall.
" + "documentation":"The updated firewall policy to use for the firewall. You can't add or remove a TLSInspectionConfiguration after you create a firewall policy. However, you can replace an existing TLS inspection configuration with another TLSInspectionConfiguration
.
The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.
Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.
To use a TLS inspection configuration, you add it to a Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" + "documentation":"The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.
Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.
To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.
" }, "Description":{ "shape":"Description", diff --git a/botocore/data/pca-connector-ad/2018-05-10/endpoint-rule-set-1.json b/botocore/data/pca-connector-ad/2018-05-10/endpoint-rule-set-1.json new file mode 100644 index 0000000000..df58dfb6b6 --- /dev/null +++ b/botocore/data/pca-connector-ad/2018-05-10/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-ad-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-ad-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-ad.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-ad.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/botocore/data/pca-connector-ad/2018-05-10/paginators-1.json b/botocore/data/pca-connector-ad/2018-05-10/paginators-1.json new file mode 100644 index 0000000000..89234776f1 --- /dev/null +++ b/botocore/data/pca-connector-ad/2018-05-10/paginators-1.json @@ -0,0 +1,34 @@ +{ + "pagination": { + "ListConnectors": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Connectors" + }, + "ListDirectoryRegistrations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DirectoryRegistrations" + }, + "ListServicePrincipalNames": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ServicePrincipalNames" + }, + "ListTemplateGroupAccessControlEntries": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AccessControlEntries" + }, + "ListTemplates": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Templates" + } + } +} diff --git a/botocore/data/pca-connector-ad/2018-05-10/service-2.json b/botocore/data/pca-connector-ad/2018-05-10/service-2.json new file mode 100644 index 0000000000..8259e515cb --- /dev/null +++ b/botocore/data/pca-connector-ad/2018-05-10/service-2.json @@ -0,0 +1,2836 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"pca-connector-ad", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"PcaConnectorAd", + "serviceId":"Pca Connector Ad", + "signatureVersion":"v4", + "signingName":"pca-connector-ad", + "uid":"pca-connector-ad-2018-05-10" + }, + "operations":{ + "CreateConnector":{ + "name":"CreateConnector", + "http":{ + "method":"POST", + "requestUri":"/connectors", + "responseCode":202 + }, + "input":{"shape":"CreateConnectorRequest"}, + "output":{"shape":"CreateConnectorResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates a connector between Amazon Web Services Private CA and an Active Directory. You must specify the private CA, directory ID, and security groups.
" + }, + "CreateDirectoryRegistration":{ + "name":"CreateDirectoryRegistration", + "http":{ + "method":"POST", + "requestUri":"/directoryRegistrations", + "responseCode":202 + }, + "input":{"shape":"CreateDirectoryRegistrationRequest"}, + "output":{"shape":"CreateDirectoryRegistrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates a directory registration that authorizes communication between Amazon Web Services Private CA and an Active Directory
" + }, + "CreateServicePrincipalName":{ + "name":"CreateServicePrincipalName", + "http":{ + "method":"POST", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}/servicePrincipalNames/{ConnectorArn}", + "responseCode":202 + }, + "input":{"shape":"CreateServicePrincipalNameRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates a service principal name (SPN) for the service account in Active Directory. Kerberos authentication uses SPNs to associate a service instance with a service sign-in account.
", + "idempotent":true + }, + "CreateTemplate":{ + "name":"CreateTemplate", + "http":{ + "method":"POST", + "requestUri":"/templates", + "responseCode":200 + }, + "input":{"shape":"CreateTemplateRequest"}, + "output":{"shape":"CreateTemplateResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates an Active Directory compatible certificate template. The connectors issues certificates using these templates based on the requester’s Active Directory group membership.
" + }, + "CreateTemplateGroupAccessControlEntry":{ + "name":"CreateTemplateGroupAccessControlEntry", + "http":{ + "method":"POST", + "requestUri":"/templates/{TemplateArn}/accessControlEntries", + "responseCode":200 + }, + "input":{"shape":"CreateTemplateGroupAccessControlEntryRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Create a group access control entry. Allow or deny Active Directory groups from enrolling and/or autoenrolling with the template based on the group security identifiers (SIDs).
", + "idempotent":true + }, + "DeleteConnector":{ + "name":"DeleteConnector", + "http":{ + "method":"DELETE", + "requestUri":"/connectors/{ConnectorArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteConnectorRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Deletes a connector for Active Directory. You must provide the Amazon Resource Name (ARN) of the connector that you want to delete. You can find the ARN by calling the https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_ListConnectors action. Deleting a connector does not deregister your directory with Amazon Web Services Private CA. You can deregister your directory by calling the https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_DeleteDirectoryRegistration action.
", + "idempotent":true + }, + "DeleteDirectoryRegistration":{ + "name":"DeleteDirectoryRegistration", + "http":{ + "method":"DELETE", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteDirectoryRegistrationRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Deletes a directory registration. Deleting a directory registration deauthorizes Amazon Web Services Private CA with the directory.
", + "idempotent":true + }, + "DeleteServicePrincipalName":{ + "name":"DeleteServicePrincipalName", + "http":{ + "method":"DELETE", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}/servicePrincipalNames/{ConnectorArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteServicePrincipalNameRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Deletes the service principal name (SPN) used by a connector to authenticate with your Active Directory.
", + "idempotent":true + }, + "DeleteTemplate":{ + "name":"DeleteTemplate", + "http":{ + "method":"DELETE", + "requestUri":"/templates/{TemplateArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteTemplateRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Deletes a template. Certificates issued using the template are still valid until they are revoked or expired.
", + "idempotent":true + }, + "DeleteTemplateGroupAccessControlEntry":{ + "name":"DeleteTemplateGroupAccessControlEntry", + "http":{ + "method":"DELETE", + "requestUri":"/templates/{TemplateArn}/accessControlEntries/{GroupSecurityIdentifier}", + "responseCode":200 + }, + "input":{"shape":"DeleteTemplateGroupAccessControlEntryRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Deletes a group access control entry.
", + "idempotent":true + }, + "GetConnector":{ + "name":"GetConnector", + "http":{ + "method":"GET", + "requestUri":"/connectors/{ConnectorArn}", + "responseCode":200 + }, + "input":{"shape":"GetConnectorRequest"}, + "output":{"shape":"GetConnectorResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists information about your connector. You specify the connector on input by its ARN (Amazon Resource Name).
" + }, + "GetDirectoryRegistration":{ + "name":"GetDirectoryRegistration", + "http":{ + "method":"GET", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}", + "responseCode":200 + }, + "input":{"shape":"GetDirectoryRegistrationRequest"}, + "output":{"shape":"GetDirectoryRegistrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"A structure that contains information about your directory registration.
" + }, + "GetServicePrincipalName":{ + "name":"GetServicePrincipalName", + "http":{ + "method":"GET", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}/servicePrincipalNames/{ConnectorArn}", + "responseCode":200 + }, + "input":{"shape":"GetServicePrincipalNameRequest"}, + "output":{"shape":"GetServicePrincipalNameResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists the service principal name that the connector uses to authenticate with Active Directory.
" + }, + "GetTemplate":{ + "name":"GetTemplate", + "http":{ + "method":"GET", + "requestUri":"/templates/{TemplateArn}", + "responseCode":200 + }, + "input":{"shape":"GetTemplateRequest"}, + "output":{"shape":"GetTemplateResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Retrieves a certificate template that the connector uses to issue certificates from a private CA.
" + }, + "GetTemplateGroupAccessControlEntry":{ + "name":"GetTemplateGroupAccessControlEntry", + "http":{ + "method":"GET", + "requestUri":"/templates/{TemplateArn}/accessControlEntries/{GroupSecurityIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetTemplateGroupAccessControlEntryRequest"}, + "output":{"shape":"GetTemplateGroupAccessControlEntryResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Retrieves the group access control entries for a template.
" + }, + "ListConnectors":{ + "name":"ListConnectors", + "http":{ + "method":"GET", + "requestUri":"/connectors", + "responseCode":200 + }, + "input":{"shape":"ListConnectorsRequest"}, + "output":{"shape":"ListConnectorsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists the connectors that you created by using the https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_CreateConnector action.
" + }, + "ListDirectoryRegistrations":{ + "name":"ListDirectoryRegistrations", + "http":{ + "method":"GET", + "requestUri":"/directoryRegistrations", + "responseCode":200 + }, + "input":{"shape":"ListDirectoryRegistrationsRequest"}, + "output":{"shape":"ListDirectoryRegistrationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists the directory registrations that you created by using the https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_CreateDirectoryRegistration action.
" + }, + "ListServicePrincipalNames":{ + "name":"ListServicePrincipalNames", + "http":{ + "method":"GET", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}/servicePrincipalNames", + "responseCode":200 + }, + "input":{"shape":"ListServicePrincipalNamesRequest"}, + "output":{"shape":"ListServicePrincipalNamesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists the service principal names that the connector uses to authenticate with Active Directory.
" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists the tags, if any, that are associated with your resource.
" + }, + "ListTemplateGroupAccessControlEntries":{ + "name":"ListTemplateGroupAccessControlEntries", + "http":{ + "method":"GET", + "requestUri":"/templates/{TemplateArn}/accessControlEntries", + "responseCode":200 + }, + "input":{"shape":"ListTemplateGroupAccessControlEntriesRequest"}, + "output":{"shape":"ListTemplateGroupAccessControlEntriesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists group access control entries you created.
" + }, + "ListTemplates":{ + "name":"ListTemplates", + "http":{ + "method":"GET", + "requestUri":"/templates", + "responseCode":200 + }, + "input":{"shape":"ListTemplatesRequest"}, + "output":{"shape":"ListTemplatesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists the templates, if any, that are associated with a connector.
" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Adds one or more tags to your resource.
" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Removes one or more tags from your resource.
", + "idempotent":true + }, + "UpdateTemplate":{ + "name":"UpdateTemplate", + "http":{ + "method":"PATCH", + "requestUri":"/templates/{TemplateArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateTemplateRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Update template configuration to define the information included in certificates.
" + }, + "UpdateTemplateGroupAccessControlEntry":{ + "name":"UpdateTemplateGroupAccessControlEntry", + "http":{ + "method":"PATCH", + "requestUri":"/templates/{TemplateArn}/accessControlEntries/{GroupSecurityIdentifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateTemplateGroupAccessControlEntryRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"Update a group access control entry you created using CreateTemplateGroupAccessControlEntry.
" + } + }, + "shapes":{ + "AccessControlEntry":{ + "type":"structure", + "members":{ + "AccessRights":{ + "shape":"AccessRights", + "documentation":"Permissions to allow or deny an Active Directory group to enroll or autoenroll certificates issued against a template.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the Access Control Entry was created.
" + }, + "GroupDisplayName":{ + "shape":"DisplayName", + "documentation":"Name of the Active Directory group. This name does not need to match the group name in Active Directory.
" + }, + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".
" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the Access Control Entry was updated.
" + } + }, + "documentation":"An access control entry allows or denies Active Directory groups based on their security identifiers (SIDs) from enrolling and/or autoenrolling with the template.
" + }, + "AccessControlEntryList":{ + "type":"list", + "member":{"shape":"AccessControlEntrySummary"} + }, + "AccessControlEntrySummary":{ + "type":"structure", + "members":{ + "AccessRights":{ + "shape":"AccessRights", + "documentation":"Allow or deny an Active Directory group from enrolling and autoenrolling certificates issued against a template.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the Access Control Entry was created.
" + }, + "GroupDisplayName":{ + "shape":"DisplayName", + "documentation":"Name of the Active Directory group. This name does not need to match the group name in Active Directory.
" + }, + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".
" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the Access Control Entry was updated.
" + } + }, + "documentation":"Summary of group access control entries that allow or deny Active Directory groups based on their security identifiers (SIDs) from enrolling and/or autofenrolling with the template.
" + }, + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"You can receive this error if you attempt to create a resource share when you don't have the required permissions. This can be caused by insufficient permissions in policies attached to your Amazon Web Services Identity and Access Management (IAM) principal. It can also happen because of restrictions in place from an Amazon Web Services Organizations service control policy (SCP) that affects your Amazon Web Services account.
", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccessRight":{ + "type":"string", + "enum":[ + "ALLOW", + "DENY" + ] + }, + "AccessRights":{ + "type":"structure", + "members":{ + "AutoEnroll":{ + "shape":"AccessRight", + "documentation":"Allow or deny an Active Directory group from autoenrolling certificates issued against a template. The Active Directory group must be allowed to enroll to allow autoenrollment
" + }, + "Enroll":{ + "shape":"AccessRight", + "documentation":"Allow or deny an Active Directory group from enrolling certificates issued against a template.
" + } + }, + "documentation":"Allow or deny permissions for an Active Directory group to enroll or autoenroll certificates for a template.
" + }, + "ApplicationPolicies":{ + "type":"structure", + "required":["Policies"], + "members":{ + "Critical":{ + "shape":"Boolean", + "documentation":"Marks the application policy extension as critical.
" + }, + "Policies":{ + "shape":"ApplicationPolicyList", + "documentation":"Application policies describe what the certificate can be used for.
" + } + }, + "documentation":"Application policies describe what the certificate can be used for.
" + }, + "ApplicationPolicy":{ + "type":"structure", + "members":{ + "PolicyObjectIdentifier":{ + "shape":"CustomObjectIdentifier", + "documentation":"The object identifier (OID) of an application policy.
" + }, + "PolicyType":{ + "shape":"ApplicationPolicyType", + "documentation":"The type of application policy
" + } + }, + "documentation":"Application policies describe what the certificate can be used for.
", + "union":true + }, + "ApplicationPolicyList":{ + "type":"list", + "member":{"shape":"ApplicationPolicy"}, + "max":100, + "min":1 + }, + "ApplicationPolicyType":{ + "type":"string", + "enum":[ + "ALL_APPLICATION_POLICIES", + "ANY_PURPOSE", + "ATTESTATION_IDENTITY_KEY_CERTIFICATE", + "CERTIFICATE_REQUEST_AGENT", + "CLIENT_AUTHENTICATION", + "CODE_SIGNING", + "CTL_USAGE", + "DIGITAL_RIGHTS", + "DIRECTORY_SERVICE_EMAIL_REPLICATION", + "DISALLOWED_LIST", + "DNS_SERVER_TRUST", + "DOCUMENT_ENCRYPTION", + "DOCUMENT_SIGNING", + "DYNAMIC_CODE_GENERATOR", + "EARLY_LAUNCH_ANTIMALWARE_DRIVER", + "EMBEDDED_WINDOWS_SYSTEM_COMPONENT_VERIFICATION", + "ENCLAVE", + "ENCRYPTING_FILE_SYSTEM", + "ENDORSEMENT_KEY_CERTIFICATE", + "FILE_RECOVERY", + "HAL_EXTENSION", + "IP_SECURITY_END_SYSTEM", + "IP_SECURITY_IKE_INTERMEDIATE", + "IP_SECURITY_TUNNEL_TERMINATION", + "IP_SECURITY_USER", + "ISOLATED_USER_MODE", + "KDC_AUTHENTICATION", + "KERNEL_MODE_CODE_SIGNING", + "KEY_PACK_LICENSES", + "KEY_RECOVERY", + "KEY_RECOVERY_AGENT", + "LICENSE_SERVER_VERIFICATION", + "LIFETIME_SIGNING", + "MICROSOFT_PUBLISHER", + "MICROSOFT_TIME_STAMPING", + "MICROSOFT_TRUST_LIST_SIGNING", + "OCSP_SIGNING", + "OEM_WINDOWS_SYSTEM_COMPONENT_VERIFICATION", + "PLATFORM_CERTIFICATE", + "PREVIEW_BUILD_SIGNING", + "PRIVATE_KEY_ARCHIVAL", + "PROTECTED_PROCESS_LIGHT_VERIFICATION", + "PROTECTED_PROCESS_VERIFICATION", + "QUALIFIED_SUBORDINATION", + "REVOKED_LIST_SIGNER", + "ROOT_PROGRAM_AUTO_UPDATE_CA_REVOCATION", + "ROOT_PROGRAM_AUTO_UPDATE_END_REVOCATION", + "ROOT_PROGRAM_NO_OSCP_FAILOVER_TO_CRL", + "ROOT_LIST_SIGNER", + "SECURE_EMAIL", + "SERVER_AUTHENTICATION", + "SMART_CARD_LOGIN", + "SPC_ENCRYPTED_DIGEST_RETRY_COUNT", + "SPC_RELAXED_PE_MARKER_CHECK", + "TIME_STAMPING", + "WINDOWS_HARDWARE_DRIVER_ATTESTED_VERIFICATION", + "WINDOWS_HARDWARE_DRIVER_EXTENDED_VERIFICATION", + "WINDOWS_HARDWARE_DRIVER_VERIFICATION", + "WINDOWS_HELLO_RECOVERY_KEY_ENCRYPTION", + "WINDOWS_KITS_COMPONENT", + "WINDOWS_RT_VERIFICATION", + "WINDOWS_SOFTWARE_EXTENSION_VERIFICATION", + "WINDOWS_STORE", + "WINDOWS_SYSTEM_COMPONENT_VERIFICATION", + "WINDOWS_TCB_COMPONENT", + "WINDOWS_THIRD_PARTY_APPLICATION_COMPONENT", + "WINDOWS_UPDATE" + ] + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CertificateAuthorityArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"^arn:[\\w-]+:acm-pca:[\\w-]+:[0-9]+:certificate-authority\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$" + }, + "CertificateValidity":{ + "type":"structure", + "required":[ + "RenewalPeriod", + "ValidityPeriod" + ], + "members":{ + "RenewalPeriod":{ + "shape":"ValidityPeriod", + "documentation":"Renewal period is the period of time before certificate expiration when a new certificate will be requested.
" + }, + "ValidityPeriod":{ + "shape":"ValidityPeriod", + "documentation":"Information describing the end of the validity period of the certificate. This parameter sets the “Not After” date for the certificate. Certificate validity is the period of time during which a certificate is valid. Validity can be expressed as an explicit date and time when the certificate expires, or as a span of time after issuance, stated in days, months, or years. For more information, see Validity in RFC 5280. This value is unaffected when ValidityNotBefore is also specified. For example, if Validity is set to 20 days in the future, the certificate will expire 20 days from issuance time regardless of the ValidityNotBefore value.
" + } + }, + "documentation":"Information describing the end of the validity period of the certificate. This parameter sets the “Not After” date for the certificate. Certificate validity is the period of time during which a certificate is valid. Validity can be expressed as an explicit date and time when the certificate expires, or as a span of time after issuance, stated in days, months, or years. For more information, see Validity in RFC 5280. This value is unaffected when ValidityNotBefore is also specified. For example, if Validity is set to 20 days in the future, the certificate will expire 20 days from issuance time regardless of the ValidityNotBefore value.
" + }, + "ClientCompatibilityV2":{ + "type":"string", + "enum":[ + "WINDOWS_SERVER_2003", + "WINDOWS_SERVER_2008", + "WINDOWS_SERVER_2008_R2", + "WINDOWS_SERVER_2012", + "WINDOWS_SERVER_2012_R2", + "WINDOWS_SERVER_2016" + ] + }, + "ClientCompatibilityV3":{ + "type":"string", + "enum":[ + "WINDOWS_SERVER_2008", + "WINDOWS_SERVER_2008_R2", + "WINDOWS_SERVER_2012", + "WINDOWS_SERVER_2012_R2", + "WINDOWS_SERVER_2016" + ] + }, + "ClientCompatibilityV4":{ + "type":"string", + "enum":[ + "WINDOWS_SERVER_2012", + "WINDOWS_SERVER_2012_R2", + "WINDOWS_SERVER_2016" + ] + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[!-~]+$" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"The identifier of the Amazon Web Services resource.
" + }, + "ResourceType":{ + "shape":"String", + "documentation":"The resource type, which can be one of Connector
, Template
, TemplateGroupAccessControlEntry
, ServicePrincipalName
, or DirectoryRegistration
.
This request cannot be completed for one of the following reasons because the requested resource was being concurrently modified by another request.
", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "Connector":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
" + }, + "CertificateAuthorityArn":{ + "shape":"CertificateAuthorityArn", + "documentation":"The Amazon Resource Name (ARN) of the certificate authority being used.
" + }, + "CertificateEnrollmentPolicyServerEndpoint":{ + "shape":"String", + "documentation":"Certificate enrollment endpoint for Active Directory domain-joined objects reach out to when requesting certificates.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the connector was created.
" + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"The identifier of the Active Directory.
" + }, + "Status":{ + "shape":"ConnectorStatus", + "documentation":"Status of the connector. Status can be creating, active, deleting, or failed.
" + }, + "StatusReason":{ + "shape":"ConnectorStatusReason", + "documentation":"Additional information about the connector status if the status is failed.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the connector was updated.
" + }, + "VpcInformation":{ + "shape":"VpcInformation", + "documentation":"Information of the VPC and security group(s) used with the connector.
" + } + }, + "documentation":"Amazon Web Services Private CA Connector for Active Directory is a service that links your Active Directory with Amazon Web Services Private CA. The connector brokers the exchange of certificates from Amazon Web Services Private CA to domain-joined users and machines managed with Active Directory.
" + }, + "ConnectorArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"^arn:[\\w-]+:pca-connector-ad:[\\w-]+:[0-9]+:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$" + }, + "ConnectorList":{ + "type":"list", + "member":{"shape":"ConnectorSummary"} + }, + "ConnectorStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, + "ConnectorStatusReason":{ + "type":"string", + "enum":[ + "DIRECTORY_ACCESS_DENIED", + "INTERNAL_FAILURE", + "PRIVATECA_ACCESS_DENIED", + "PRIVATECA_RESOURCE_NOT_FOUND", + "SECURITY_GROUP_NOT_IN_VPC", + "VPC_ACCESS_DENIED", + "VPC_ENDPOINT_LIMIT_EXCEEDED", + "VPC_RESOURCE_NOT_FOUND" + ] + }, + "ConnectorSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
" + }, + "CertificateAuthorityArn":{ + "shape":"CertificateAuthorityArn", + "documentation":"The Amazon Resource Name (ARN) of the certificate authority being used.
" + }, + "CertificateEnrollmentPolicyServerEndpoint":{ + "shape":"String", + "documentation":"Certificate enrollment endpoint for Active Directory domain-joined objects to request certificates.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the connector was created.
" + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"The identifier of the Active Directory.
" + }, + "Status":{ + "shape":"ConnectorStatus", + "documentation":"Status of the connector. Status can be creating, active, deleting, or failed.
" + }, + "StatusReason":{ + "shape":"ConnectorStatusReason", + "documentation":"Additional information about the connector status if the status is failed.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the connector was updated.
" + }, + "VpcInformation":{ + "shape":"VpcInformation", + "documentation":"Information of the VPC and security group(s) used with the connector.
" + } + }, + "documentation":"Summary description of the Amazon Web Services Private CA AD connectors belonging to an Amazon Web Services account.
" + }, + "CreateConnectorRequest":{ + "type":"structure", + "required":[ + "CertificateAuthorityArn", + "DirectoryId", + "VpcInformation" + ], + "members":{ + "CertificateAuthorityArn":{ + "shape":"CertificateAuthorityArn", + "documentation":"The Amazon Resource Name (ARN) of the certificate authority being used.
" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"Idempotency token.
", + "idempotencyToken":true + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"The identifier of the Active Directory.
" + }, + "Tags":{ + "shape":"Tags", + "documentation":"Metadata assigned to a connector consisting of a key-value pair.
" + }, + "VpcInformation":{ + "shape":"VpcInformation", + "documentation":"Security group IDs that describe the inbound and outbound rules.
" + } + } + }, + "CreateConnectorResponse":{ + "type":"structure", + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"If successful, the Amazon Resource Name (ARN) of the connector for Active Directory.
" + } + } + }, + "CreateDirectoryRegistrationRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"Idempotency token.
", + "idempotencyToken":true + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"The identifier of the Active Directory.
" + }, + "Tags":{ + "shape":"Tags", + "documentation":"Metadata assigned to a directory registration consisting of a key-value pair.
" + } + } + }, + "CreateDirectoryRegistrationResponse":{ + "type":"structure", + "members":{ + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
" + } + } + }, + "CreateServicePrincipalNameRequest":{ + "type":"structure", + "required":[ + "ConnectorArn", + "DirectoryRegistrationArn" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"Idempotency token.
", + "idempotencyToken":true + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
", + "location":"uri", + "locationName":"ConnectorArn" + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "CreateTemplateGroupAccessControlEntryRequest":{ + "type":"structure", + "required":[ + "AccessRights", + "GroupDisplayName", + "GroupSecurityIdentifier", + "TemplateArn" + ], + "members":{ + "AccessRights":{ + "shape":"AccessRights", + "documentation":"Allow or deny permissions for an Active Directory group to enroll or autoenroll certificates for a template.
" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"Idempotency token.
", + "idempotencyToken":true + }, + "GroupDisplayName":{ + "shape":"DisplayName", + "documentation":"Name of the Active Directory group. This name does not need to match the group name in Active Directory.
" + }, + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".
" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "CreateTemplateRequest":{ + "type":"structure", + "required":[ + "ConnectorArn", + "Definition", + "Name" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"Idempotency token.
", + "idempotencyToken":true + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
" + }, + "Definition":{ + "shape":"TemplateDefinition", + "documentation":"Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.
" + }, + "Name":{ + "shape":"TemplateName", + "documentation":"Name of the template. The template name must be unique.
" + }, + "Tags":{ + "shape":"Tags", + "documentation":"Metadata assigned to a template consisting of a key-value pair.
" + } + } + }, + "CreateTemplateResponse":{ + "type":"structure", + "members":{ + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"If successful, the Amazon Resource Name (ARN) of the template.
" + } + } + }, + "CryptoProvidersList":{ + "type":"list", + "member":{"shape":"CryptoProvidersListMemberString"}, + "max":100, + "min":1 + }, + "CryptoProvidersListMemberString":{ + "type":"string", + "max":100, + "min":1 + }, + "CustomObjectIdentifier":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^([0-2])\\.([0-9]|([0-3][0-9]))(\\.([0-9]+)){0,126}$" + }, + "DeleteConnectorRequest":{ + "type":"structure", + "required":["ConnectorArn"], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
", + "location":"uri", + "locationName":"ConnectorArn" + } + } + }, + "DeleteDirectoryRegistrationRequest":{ + "type":"structure", + "required":["DirectoryRegistrationArn"], + "members":{ + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "DeleteServicePrincipalNameRequest":{ + "type":"structure", + "required":[ + "ConnectorArn", + "DirectoryRegistrationArn" + ], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
", + "location":"uri", + "locationName":"ConnectorArn" + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "DeleteTemplateGroupAccessControlEntryRequest":{ + "type":"structure", + "required":[ + "GroupSecurityIdentifier", + "TemplateArn" + ], + "members":{ + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".
", + "location":"uri", + "locationName":"GroupSecurityIdentifier" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "DeleteTemplateRequest":{ + "type":"structure", + "required":["TemplateArn"], + "members":{ + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "DirectoryId":{ + "type":"string", + "pattern":"^d-[0-9a-f]{10}$" + }, + "DirectoryRegistration":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the directory registration was created.
" + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"The identifier of the Active Directory.
" + }, + "Status":{ + "shape":"DirectoryRegistrationStatus", + "documentation":"Status of the directory registration.
" + }, + "StatusReason":{ + "shape":"DirectoryRegistrationStatusReason", + "documentation":"Additional information about the directory registration status if the status is failed.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the directory registration was updated.
" + } + }, + "documentation":"The directory registration represents the authorization of the connector service with a directory.
" + }, + "DirectoryRegistrationArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"^arn:[\\w-]+:pca-connector-ad:[\\w-]+:[0-9]+:directory-registration\\/d-[0-9a-f]{10}$" + }, + "DirectoryRegistrationList":{ + "type":"list", + "member":{"shape":"DirectoryRegistrationSummary"} + }, + "DirectoryRegistrationStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, + "DirectoryRegistrationStatusReason":{ + "type":"string", + "enum":[ + "DIRECTORY_ACCESS_DENIED", + "DIRECTORY_RESOURCE_NOT_FOUND", + "DIRECTORY_NOT_ACTIVE", + "DIRECTORY_NOT_REACHABLE", + "DIRECTORY_TYPE_NOT_SUPPORTED", + "INTERNAL_FAILURE" + ] + }, + "DirectoryRegistrationSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the directory registration was created.
" + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"The identifier of the Active Directory.
" + }, + "Status":{ + "shape":"DirectoryRegistrationStatus", + "documentation":"Status of the directory registration.
" + }, + "StatusReason":{ + "shape":"DirectoryRegistrationStatusReason", + "documentation":"Additional information about the directory registration status if the status is failed.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the directory registration was updated.
" + } + }, + "documentation":"The directory registration represents the authorization of the connector service with the Active Directory.
" + }, + "DisplayName":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^[\\x20-\\x7E]+$" + }, + "EnrollmentFlagsV2":{ + "type":"structure", + "members":{ + "EnableKeyReuseOnNtTokenKeysetStorageFull":{ + "shape":"Boolean", + "documentation":"Allow renewal using the same key.
" + }, + "IncludeSymmetricAlgorithms":{ + "shape":"Boolean", + "documentation":"Include symmetric algorithms allowed by the subject.
" + }, + "NoSecurityExtension":{ + "shape":"Boolean", + "documentation":"This flag instructs the CA to not include the security extension szOID_NTDS_CA_SECURITY_EXT (OID:1.3.6.1.4.1.311.25.2), as specified in [MS-WCCE] sections 2.2.2.7.7.4 and 3.2.2.6.2.1.4.5.9, in the issued certificate. This addresses a Windows Kerberos elevation-of-privilege vulnerability.
" + }, + "RemoveInvalidCertificateFromPersonalStore":{ + "shape":"Boolean", + "documentation":"Delete expired or revoked certificates instead of archiving them.
" + }, + "UserInteractionRequired":{ + "shape":"Boolean", + "documentation":"Require user interaction when the subject is enrolled and the private key associated with the certificate is used.
" + } + }, + "documentation":"Template configurations for v2 template schema.
" + }, + "EnrollmentFlagsV3":{ + "type":"structure", + "members":{ + "EnableKeyReuseOnNtTokenKeysetStorageFull":{ + "shape":"Boolean", + "documentation":"Allow renewal using the same key.
" + }, + "IncludeSymmetricAlgorithms":{ + "shape":"Boolean", + "documentation":"Include symmetric algorithms allowed by the subject.
" + }, + "NoSecurityExtension":{ + "shape":"Boolean", + "documentation":"This flag instructs the CA to not include the security extension szOID_NTDS_CA_SECURITY_EXT (OID:1.3.6.1.4.1.311.25.2), as specified in [MS-WCCE] sections 2.2.2.7.7.4 and 3.2.2.6.2.1.4.5.9, in the issued certificate. This addresses a Windows Kerberos elevation-of-privilege vulnerability.
" + }, + "RemoveInvalidCertificateFromPersonalStore":{ + "shape":"Boolean", + "documentation":"Delete expired or revoked certificates instead of archiving them.
" + }, + "UserInteractionRequired":{ + "shape":"Boolean", + "documentation":"Require user interaction when the subject is enrolled and the private key associated with the certificate is used.
" + } + }, + "documentation":"Template configurations for v3 template schema.
" + }, + "EnrollmentFlagsV4":{ + "type":"structure", + "members":{ + "EnableKeyReuseOnNtTokenKeysetStorageFull":{ + "shape":"Boolean", + "documentation":"Allow renewal using the same key.
" + }, + "IncludeSymmetricAlgorithms":{ + "shape":"Boolean", + "documentation":"Include symmetric algorithms allowed by the subject.
" + }, + "NoSecurityExtension":{ + "shape":"Boolean", + "documentation":"This flag instructs the CA to not include the security extension szOID_NTDS_CA_SECURITY_EXT (OID:1.3.6.1.4.1.311.25.2), as specified in [MS-WCCE] sections 2.2.2.7.7.4 and 3.2.2.6.2.1.4.5.9, in the issued certificate. This addresses a Windows Kerberos elevation-of-privilege vulnerability.
" + }, + "RemoveInvalidCertificateFromPersonalStore":{ + "shape":"Boolean", + "documentation":"Delete expired or revoked certificates instead of archiving them.
" + }, + "UserInteractionRequired":{ + "shape":"Boolean", + "documentation":"Require user interaction when the subject is enrolled and the private key associated with the certificate is used.
" + } + }, + "documentation":"Template configurations for v4 template schema.
" + }, + "ExtensionsV2":{ + "type":"structure", + "required":["KeyUsage"], + "members":{ + "ApplicationPolicies":{ + "shape":"ApplicationPolicies", + "documentation":"Application policies specify what the certificate is used for and its purpose.
" + }, + "KeyUsage":{ + "shape":"KeyUsage", + "documentation":"The key usage extension defines the purpose (e.g., encipherment, signature, certificate signing) of the key contained in the certificate.
" + } + }, + "documentation":"Certificate extensions for v2 template schema
" + }, + "ExtensionsV3":{ + "type":"structure", + "required":["KeyUsage"], + "members":{ + "ApplicationPolicies":{ + "shape":"ApplicationPolicies", + "documentation":"Application policies specify what the certificate is used for and its purpose.
" + }, + "KeyUsage":{ + "shape":"KeyUsage", + "documentation":"The key usage extension defines the purpose (e.g., encipherment, signature, certificate signing) of the key contained in the certificate.
" + } + }, + "documentation":"Certificate extensions for v3 template schema
" + }, + "ExtensionsV4":{ + "type":"structure", + "required":["KeyUsage"], + "members":{ + "ApplicationPolicies":{ + "shape":"ApplicationPolicies", + "documentation":"Application policies specify what the certificate is used for and its purpose.
" + }, + "KeyUsage":{ + "shape":"KeyUsage", + "documentation":"The key usage extension defines the purpose (e.g., encipherment, signature) of the key contained in the certificate.
" + } + }, + "documentation":"Certificate extensions for v4 template schema
" + }, + "GeneralFlagsV2":{ + "type":"structure", + "members":{ + "AutoEnrollment":{ + "shape":"Boolean", + "documentation":"Allows certificate issuance using autoenrollment. Set to TRUE to allow autoenrollment.
" + }, + "MachineType":{ + "shape":"Boolean", + "documentation":"Defines if the template is for machines or users. Set to TRUE if the template is for machines. Set to FALSE if the template is for users.
" + } + }, + "documentation":"General flags for v2 template schema that defines if the template is for a machine or a user and if the template can be issued using autoenrollment.
" + }, + "GeneralFlagsV3":{ + "type":"structure", + "members":{ + "AutoEnrollment":{ + "shape":"Boolean", + "documentation":"Allows certificate issuance using autoenrollment. Set to TRUE to allow autoenrollment.
" + }, + "MachineType":{ + "shape":"Boolean", + "documentation":"Defines if the template is for machines or users. Set to TRUE if the template is for machines. Set to FALSE if the template is for users
" + } + }, + "documentation":"General flags for v3 template schema that defines if the template is for a machine or a user and if the template can be issued using autoenrollment.
" + }, + "GeneralFlagsV4":{ + "type":"structure", + "members":{ + "AutoEnrollment":{ + "shape":"Boolean", + "documentation":"Allows certificate issuance using autoenrollment. Set to TRUE to allow autoenrollment.
" + }, + "MachineType":{ + "shape":"Boolean", + "documentation":"Defines if the template is for machines or users. Set to TRUE if the template is for machines. Set to FALSE if the template is for users
" + } + }, + "documentation":"General flags for v4 template schema that defines if the template is for a machine or a user and if the template can be issued using autoenrollment.
" + }, + "GetConnectorRequest":{ + "type":"structure", + "required":["ConnectorArn"], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
", + "location":"uri", + "locationName":"ConnectorArn" + } + } + }, + "GetConnectorResponse":{ + "type":"structure", + "members":{ + "Connector":{ + "shape":"Connector", + "documentation":"A structure that contains information about your connector.
" + } + } + }, + "GetDirectoryRegistrationRequest":{ + "type":"structure", + "required":["DirectoryRegistrationArn"], + "members":{ + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "GetDirectoryRegistrationResponse":{ + "type":"structure", + "members":{ + "DirectoryRegistration":{ + "shape":"DirectoryRegistration", + "documentation":"The directory registration represents the authorization of the connector service with a directory.
" + } + } + }, + "GetServicePrincipalNameRequest":{ + "type":"structure", + "required":[ + "ConnectorArn", + "DirectoryRegistrationArn" + ], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
", + "location":"uri", + "locationName":"ConnectorArn" + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "GetServicePrincipalNameResponse":{ + "type":"structure", + "members":{ + "ServicePrincipalName":{ + "shape":"ServicePrincipalName", + "documentation":"The service principal name that the connector uses to authenticate with Active Directory.
" + } + } + }, + "GetTemplateGroupAccessControlEntryRequest":{ + "type":"structure", + "required":[ + "GroupSecurityIdentifier", + "TemplateArn" + ], + "members":{ + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".
", + "location":"uri", + "locationName":"GroupSecurityIdentifier" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "GetTemplateGroupAccessControlEntryResponse":{ + "type":"structure", + "members":{ + "AccessControlEntry":{ + "shape":"AccessControlEntry", + "documentation":"An access control entry allows or denies an Active Directory group from enrolling and/or autoenrolling with a template.
" + } + } + }, + "GetTemplateRequest":{ + "type":"structure", + "required":["TemplateArn"], + "members":{ + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "GetTemplateResponse":{ + "type":"structure", + "members":{ + "Template":{ + "shape":"Template", + "documentation":"A certificate template that the connector uses to issue certificates from a private CA.
" + } + } + }, + "GroupSecurityIdentifier":{ + "type":"string", + "max":256, + "min":7, + "pattern":"^S-[0-9]-([0-9]+-){1,14}[0-9]+$" + }, + "HashAlgorithm":{ + "type":"string", + "enum":[ + "SHA256", + "SHA384", + "SHA512" + ] + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"The request processing has failed because of an unknown error, exception or failure with an internal server.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "KeySpec":{ + "type":"string", + "enum":[ + "KEY_EXCHANGE", + "SIGNATURE" + ] + }, + "KeyUsage":{ + "type":"structure", + "required":["UsageFlags"], + "members":{ + "Critical":{ + "shape":"Boolean", + "documentation":"Sets the key usage extension to critical.
" + }, + "UsageFlags":{ + "shape":"KeyUsageFlags", + "documentation":"The key usage flags represent the purpose (e.g., encipherment, signature) of the key contained in the certificate.
" + } + }, + "documentation":"The key usage extension defines the purpose (e.g., encipherment, signature) of the key contained in the certificate.
" + }, + "KeyUsageFlags":{ + "type":"structure", + "members":{ + "DataEncipherment":{ + "shape":"Boolean", + "documentation":"DataEncipherment is asserted when the subject public key is used for directly enciphering raw user data without the use of an intermediate symmetric cipher.
" + }, + "DigitalSignature":{ + "shape":"Boolean", + "documentation":"The digitalSignature is asserted when the subject public key is used for verifying digital signatures.
" + }, + "KeyAgreement":{ + "shape":"Boolean", + "documentation":"KeyAgreement is asserted when the subject public key is used for key agreement.
" + }, + "KeyEncipherment":{ + "shape":"Boolean", + "documentation":"KeyEncipherment is asserted when the subject public key is used for enciphering private or secret keys, i.e., for key transport.
" + }, + "NonRepudiation":{ + "shape":"Boolean", + "documentation":"NonRepudiation is asserted when the subject public key is used to verify digital signatures.
" + } + }, + "documentation":"The key usage flags represent the purpose (e.g., encipherment, signature) of the key contained in the certificate.
" + }, + "KeyUsageProperty":{ + "type":"structure", + "members":{ + "PropertyFlags":{ + "shape":"KeyUsagePropertyFlags", + "documentation":"You can specify key usage for encryption, key agreement, and signature. You can use property flags or property type but not both.
" + }, + "PropertyType":{ + "shape":"KeyUsagePropertyType", + "documentation":"You can specify all key usages using property type ALL. You can use property type or property flags but not both.
" + } + }, + "documentation":"The key usage property defines the purpose of the private key contained in the certificate. You can specify specific purposes using property flags or all by using property type ALL.
", + "union":true + }, + "KeyUsagePropertyFlags":{ + "type":"structure", + "members":{ + "Decrypt":{ + "shape":"Boolean", + "documentation":"Allows key for encryption and decryption.
" + }, + "KeyAgreement":{ + "shape":"Boolean", + "documentation":"Allows key exchange without encryption.
" + }, + "Sign":{ + "shape":"Boolean", + "documentation":"Allow key use for digital signature.
" + } + }, + "documentation":"Specifies key usage.
" + }, + "KeyUsagePropertyType":{ + "type":"string", + "enum":["ALL"] + }, + "ListConnectorsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken
element is sent in the response. Use this NextToken
value in a subsequent request to retrieve additional items.
Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken
parameter from the response you just received.
Summary information about each connector you have created.
" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.
" + } + } + }, + "ListDirectoryRegistrationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken
element is sent in the response. Use this NextToken
value in a subsequent request to retrieve additional items.
Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken
parameter from the response you just received.
Summary information about each directory registration you have created.
" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken
parameter from the response you just received.
The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken
element is sent in the response. Use this NextToken
value in a subsequent request to retrieve additional items.
Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken
parameter from the response you just received.
Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken
parameter from the response you just received.
The service principal name, if any, that the connector uses to authenticate with Active Directory.
" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) that was returned when you created the resource.
", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"The tags, if any, that are associated with your resource.
" + } + } + }, + "ListTemplateGroupAccessControlEntriesRequest":{ + "type":"structure", + "required":["TemplateArn"], + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken
element is sent in the response. Use this NextToken
value in a subsequent request to retrieve additional items.
Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken
parameter from the response you just received.
The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "ListTemplateGroupAccessControlEntriesResponse":{ + "type":"structure", + "members":{ + "AccessControlEntries":{ + "shape":"AccessControlEntryList", + "documentation":"An access control entry grants or denies permission to an Active Directory group to enroll certificates for a template.
" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken
parameter from the response you just received.
The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
", + "location":"querystring", + "locationName":"ConnectorArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken
element is sent in the response. Use this NextToken
value in a subsequent request to retrieve additional items.
Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken
parameter from the response you just received.
Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken
parameter from the response you just received.
Custom configuration templates used when issuing a certificate.
" + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^(?:[A-Za-z0-9_-]{4})*(?:[A-Za-z0-9_-]{2}==|[A-Za-z0-9_-]{3}=)?$" + }, + "PrivateKeyAlgorithm":{ + "type":"string", + "enum":[ + "RSA", + "ECDH_P256", + "ECDH_P384", + "ECDH_P521" + ] + }, + "PrivateKeyAttributesV2":{ + "type":"structure", + "required":[ + "KeySpec", + "MinimalKeyLength" + ], + "members":{ + "CryptoProviders":{ + "shape":"CryptoProvidersList", + "documentation":"Defines the cryptographic providers used to generate the private key.
" + }, + "KeySpec":{ + "shape":"KeySpec", + "documentation":"Defines the purpose of the private key. Set it to \"KEY_EXCHANGE\" or \"SIGNATURE\" value.
" + }, + "MinimalKeyLength":{ + "shape":"PrivateKeyAttributesV2MinimalKeyLengthInteger", + "documentation":"Set the minimum key length of the private key.
" + } + }, + "documentation":"Defines the attributes of the private key.
" + }, + "PrivateKeyAttributesV2MinimalKeyLengthInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "PrivateKeyAttributesV3":{ + "type":"structure", + "required":[ + "Algorithm", + "KeySpec", + "KeyUsageProperty", + "MinimalKeyLength" + ], + "members":{ + "Algorithm":{ + "shape":"PrivateKeyAlgorithm", + "documentation":"Defines the algorithm used to generate the private key.
" + }, + "CryptoProviders":{ + "shape":"CryptoProvidersList", + "documentation":"Defines the cryptographic providers used to generate the private key.
" + }, + "KeySpec":{ + "shape":"KeySpec", + "documentation":"Defines the purpose of the private key. Set it to \"KEY_EXCHANGE\" or \"SIGNATURE\" value.
" + }, + "KeyUsageProperty":{ + "shape":"KeyUsageProperty", + "documentation":"The key usage property defines the purpose of the private key contained in the certificate. You can specify specific purposes using property flags or all by using property type ALL.
" + }, + "MinimalKeyLength":{ + "shape":"PrivateKeyAttributesV3MinimalKeyLengthInteger", + "documentation":"Set the minimum key length of the private key.
" + } + }, + "documentation":"Defines the attributes of the private key.
" + }, + "PrivateKeyAttributesV3MinimalKeyLengthInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "PrivateKeyAttributesV4":{ + "type":"structure", + "required":[ + "KeySpec", + "MinimalKeyLength" + ], + "members":{ + "Algorithm":{ + "shape":"PrivateKeyAlgorithm", + "documentation":"Defines the algorithm used to generate the private key.
" + }, + "CryptoProviders":{ + "shape":"CryptoProvidersList", + "documentation":"Defines the cryptographic providers used to generate the private key.
" + }, + "KeySpec":{ + "shape":"KeySpec", + "documentation":"Defines the purpose of the private key. Set it to \"KEY_EXCHANGE\" or \"SIGNATURE\" value.
" + }, + "KeyUsageProperty":{ + "shape":"KeyUsageProperty", + "documentation":"The key usage property defines the purpose of the private key contained in the certificate. You can specify specific purposes using property flags or all by using property type ALL.
" + }, + "MinimalKeyLength":{ + "shape":"PrivateKeyAttributesV4MinimalKeyLengthInteger", + "documentation":"Set the minimum key length of the private key.
" + } + }, + "documentation":"Defines the attributes of the private key.
" + }, + "PrivateKeyAttributesV4MinimalKeyLengthInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "PrivateKeyFlagsV2":{ + "type":"structure", + "required":["ClientVersion"], + "members":{ + "ClientVersion":{ + "shape":"ClientCompatibilityV2", + "documentation":"Defines the minimum client compatibility.
" + }, + "ExportableKey":{ + "shape":"Boolean", + "documentation":"Allows the private key to be exported.
" + }, + "StrongKeyProtectionRequired":{ + "shape":"Boolean", + "documentation":"Require user input when using the private key for enrollment.
" + } + }, + "documentation":"Private key flags for v2 templates specify the client compatibility, if the private key can be exported, and if user input is required when using a private key.
" + }, + "PrivateKeyFlagsV3":{ + "type":"structure", + "required":["ClientVersion"], + "members":{ + "ClientVersion":{ + "shape":"ClientCompatibilityV3", + "documentation":"Defines the minimum client compatibility.
" + }, + "ExportableKey":{ + "shape":"Boolean", + "documentation":"Allows the private key to be exported.
" + }, + "RequireAlternateSignatureAlgorithm":{ + "shape":"Boolean", + "documentation":"Reguires the PKCS #1 v2.1 signature format for certificates. You should verify that your CA, objects, and applications can accept this signature format.
" + }, + "StrongKeyProtectionRequired":{ + "shape":"Boolean", + "documentation":"Requirer user input when using the private key for enrollment.
" + } + }, + "documentation":"Private key flags for v3 templates specify the client compatibility, if the private key can be exported, if user input is required when using a private key, and if an alternate signature algorithm should be used.
" + }, + "PrivateKeyFlagsV4":{ + "type":"structure", + "required":["ClientVersion"], + "members":{ + "ClientVersion":{ + "shape":"ClientCompatibilityV4", + "documentation":"Defines the minimum client compatibility.
" + }, + "ExportableKey":{ + "shape":"Boolean", + "documentation":"Allows the private key to be exported.
" + }, + "RequireAlternateSignatureAlgorithm":{ + "shape":"Boolean", + "documentation":"Requires the PKCS #1 v2.1 signature format for certificates. You should verify that your CA, objects, and applications can accept this signature format.
" + }, + "RequireSameKeyRenewal":{ + "shape":"Boolean", + "documentation":"Renew certificate using the same private key.
" + }, + "StrongKeyProtectionRequired":{ + "shape":"Boolean", + "documentation":"Require user input when using the private key for enrollment.
" + }, + "UseLegacyProvider":{ + "shape":"Boolean", + "documentation":"Specifies the cryptographic service provider category used to generate private keys. Set to TRUE to use Legacy Cryptographic Service Providers and FALSE to use Key Storage Providers.
" + } + }, + "documentation":"Private key flags for v4 templates specify the client compatibility, if the private key can be exported, if user input is required when using a private key, if an alternate signature algorithm should be used, and if certificates are renewed using the same private key.
" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"The identifier of the Amazon Web Services resource.
" + }, + "ResourceType":{ + "shape":"String", + "documentation":"The resource type, which can be one of Connector
, Template
, TemplateGroupAccessControlEntry
, ServicePrincipalName
, or DirectoryRegistration
.
The operation tried to access a nonexistent resource. The resource might not be specified correctly, or its status might not be ACTIVE.
", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SecurityGroupId":{ + "type":"string", + "max":20, + "min":11, + "pattern":"^(?:sg-[0-9a-f]{8}|sg-[0-9a-f]{17})$" + }, + "SecurityGroupIdList":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":4, + "min":1 + }, + "ServicePrincipalName":{ + "type":"structure", + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.html.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the service principal name was created.
" + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
" + }, + "Status":{ + "shape":"ServicePrincipalNameStatus", + "documentation":"The status of a service principal name.
" + }, + "StatusReason":{ + "shape":"ServicePrincipalNameStatusReason", + "documentation":"Additional information for the status of a service principal name if the status is failed.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the service principal name was updated.
" + } + }, + "documentation":"The service principal name that the connector uses to authenticate with Active Directory.
" + }, + "ServicePrincipalNameList":{ + "type":"list", + "member":{"shape":"ServicePrincipalNameSummary"} + }, + "ServicePrincipalNameStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, + "ServicePrincipalNameStatusReason":{ + "type":"string", + "enum":[ + "DIRECTORY_ACCESS_DENIED", + "DIRECTORY_NOT_REACHABLE", + "DIRECTORY_RESOURCE_NOT_FOUND", + "SPN_EXISTS_ON_DIFFERENT_AD_OBJECT", + "INTERNAL_FAILURE" + ] + }, + "ServicePrincipalNameSummary":{ + "type":"structure", + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the service principal name was created.
" + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.
" + }, + "Status":{ + "shape":"ServicePrincipalNameStatus", + "documentation":"The status of a service principal name.
" + }, + "StatusReason":{ + "shape":"ServicePrincipalNameStatusReason", + "documentation":"Additional information for the status of a service principal name if the status is failed.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"Time when the service principal name was updated.
" + } + }, + "documentation":"The service principal name that the connector uses to authenticate with Active Directory.
" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "Message", + "QuotaCode", + "ResourceId", + "ResourceType", + "ServiceCode" + ], + "members":{ + "Message":{"shape":"String"}, + "QuotaCode":{ + "shape":"String", + "documentation":"The code associated with the service quota.
" + }, + "ResourceId":{ + "shape":"String", + "documentation":"The identifier of the Amazon Web Services resource.
" + }, + "ResourceType":{ + "shape":"String", + "documentation":"The resource type, which can be one of Connector
, Template
, TemplateGroupAccessControlEntry
, ServicePrincipalName
, or DirectoryRegistration
.
Identifies the originating service.
" + } + }, + "documentation":"Request would cause a service quota to be exceeded.
", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "SubjectNameFlagsV2":{ + "type":"structure", + "members":{ + "RequireCommonName":{ + "shape":"Boolean", + "documentation":"Include the common name in the subject name.
" + }, + "RequireDirectoryPath":{ + "shape":"Boolean", + "documentation":"Include the directory path in the subject name.
" + }, + "RequireDnsAsCn":{ + "shape":"Boolean", + "documentation":"Include the DNS as common name in the subject name.
" + }, + "RequireEmail":{ + "shape":"Boolean", + "documentation":"Include the subject's email in the subject name.
" + }, + "SanRequireDirectoryGuid":{ + "shape":"Boolean", + "documentation":"Include the globally unique identifier (GUID) in the subject alternate name.
" + }, + "SanRequireDns":{ + "shape":"Boolean", + "documentation":"Include the DNS in the subject alternate name.
" + }, + "SanRequireDomainDns":{ + "shape":"Boolean", + "documentation":"Include the domain DNS in the subject alternate name.
" + }, + "SanRequireEmail":{ + "shape":"Boolean", + "documentation":"Include the subject's email in the subject alternate name.
" + }, + "SanRequireSpn":{ + "shape":"Boolean", + "documentation":"Include the service principal name (SPN) in the subject alternate name.
" + }, + "SanRequireUpn":{ + "shape":"Boolean", + "documentation":"Include the user principal name (UPN) in the subject alternate name.
" + } + }, + "documentation":"Information to include in the subject name and alternate subject name of the certificate. The subject name can be common name, directory path, DNS as common name, or left blank. You can optionally include email to the subject name for user templates. If you leave the subject name blank then you must set a subject alternate name. The subject alternate name (SAN) can include globally unique identifier (GUID), DNS, domain DNS, email, service principal name (SPN), and user principal name (UPN). You can leave the SAN blank. If you leave the SAN blank, then you must set a subject name.
" + }, + "SubjectNameFlagsV3":{ + "type":"structure", + "members":{ + "RequireCommonName":{ + "shape":"Boolean", + "documentation":"Include the common name in the subject name.
" + }, + "RequireDirectoryPath":{ + "shape":"Boolean", + "documentation":"Include the directory path in the subject name.
" + }, + "RequireDnsAsCn":{ + "shape":"Boolean", + "documentation":"Include the DNS as common name in the subject name.
" + }, + "RequireEmail":{ + "shape":"Boolean", + "documentation":"Include the subject's email in the subject name.
" + }, + "SanRequireDirectoryGuid":{ + "shape":"Boolean", + "documentation":"Include the globally unique identifier (GUID) in the subject alternate name.
" + }, + "SanRequireDns":{ + "shape":"Boolean", + "documentation":"Include the DNS in the subject alternate name.
" + }, + "SanRequireDomainDns":{ + "shape":"Boolean", + "documentation":"Include the domain DNS in the subject alternate name.
" + }, + "SanRequireEmail":{ + "shape":"Boolean", + "documentation":"Include the subject's email in the subject alternate name.
" + }, + "SanRequireSpn":{ + "shape":"Boolean", + "documentation":"Include the service principal name (SPN) in the subject alternate name.
" + }, + "SanRequireUpn":{ + "shape":"Boolean", + "documentation":"Include the user principal name (UPN) in the subject alternate name.
" + } + }, + "documentation":"Information to include in the subject name and alternate subject name of the certificate. The subject name can be common name, directory path, DNS as common name, or left blank. You can optionally include email to the subject name for user templates. If you leave the subject name blank then you must set a subject alternate name. The subject alternate name (SAN) can include globally unique identifier (GUID), DNS, domain DNS, email, service principal name (SPN), and user principal name (UPN). You can leave the SAN blank. If you leave the SAN blank, then you must set a subject name.
" + }, + "SubjectNameFlagsV4":{ + "type":"structure", + "members":{ + "RequireCommonName":{ + "shape":"Boolean", + "documentation":"Include the common name in the subject name.
" + }, + "RequireDirectoryPath":{ + "shape":"Boolean", + "documentation":"Include the directory path in the subject name.
" + }, + "RequireDnsAsCn":{ + "shape":"Boolean", + "documentation":"Include the DNS as common name in the subject name.
" + }, + "RequireEmail":{ + "shape":"Boolean", + "documentation":"Include the subject's email in the subject name.
" + }, + "SanRequireDirectoryGuid":{ + "shape":"Boolean", + "documentation":"Include the globally unique identifier (GUID) in the subject alternate name.
" + }, + "SanRequireDns":{ + "shape":"Boolean", + "documentation":"Include the DNS in the subject alternate name.
" + }, + "SanRequireDomainDns":{ + "shape":"Boolean", + "documentation":"Include the domain DNS in the subject alternate name.
" + }, + "SanRequireEmail":{ + "shape":"Boolean", + "documentation":"Include the subject's email in the subject alternate name.
" + }, + "SanRequireSpn":{ + "shape":"Boolean", + "documentation":"Include the service principal name (SPN) in the subject alternate name.
" + }, + "SanRequireUpn":{ + "shape":"Boolean", + "documentation":"Include the user principal name (UPN) in the subject alternate name.
" + } + }, + "documentation":"Information to include in the subject name and alternate subject name of the certificate. The subject name can be common name, directory path, DNS as common name, or left blank. You can optionally include email to the subject name for user templates. If you leave the subject name blank then you must set a subject alternate name. The subject alternate name (SAN) can include globally unique identifier (GUID), DNS, domain DNS, email, service principal name (SPN), and user principal name (UPN). You can leave the SAN blank. If you leave the SAN blank, then you must set a subject name.
" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) that was returned when you created the resource.
", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"Tags", + "documentation":"Metadata assigned to a directory registration consisting of a key-value pair.
" + } + } + }, + "Tags":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Template":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
" + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the template was created.
" + }, + "Definition":{ + "shape":"TemplateDefinition", + "documentation":"Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.
" + }, + "Name":{ + "shape":"TemplateName", + "documentation":"Name of the templates. Template names must be unique.
" + }, + "ObjectIdentifier":{ + "shape":"CustomObjectIdentifier", + "documentation":"Object identifier of a template.
" + }, + "PolicySchema":{ + "shape":"Integer", + "documentation":"The template schema version. Template schema versions can be v2, v3, or v4. The template configuration options change based on the template schema version.
" + }, + "Revision":{ + "shape":"TemplateRevision", + "documentation":"The version of the template. Template updates will increment the minor revision. Re-enrolling all certificate holders will increment the major revision.
" + }, + "Status":{ + "shape":"TemplateStatus", + "documentation":"Status of the template. Status can be creating, active, deleting, or failed.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the template was updated.
" + } + }, + "documentation":"An Active Directory compatible certificate template. Connectors issue certificates against these templates based on the requestor's Active Directory group membership.
" + }, + "TemplateArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"^arn:[\\w-]+:pca-connector-ad:[\\w-]+:[0-9]+:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}\\/template\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$" + }, + "TemplateDefinition":{ + "type":"structure", + "members":{ + "TemplateV2":{ + "shape":"TemplateV2", + "documentation":"Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.
" + }, + "TemplateV3":{ + "shape":"TemplateV3", + "documentation":"Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.
" + }, + "TemplateV4":{ + "shape":"TemplateV4", + "documentation":"Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.
" + } + }, + "documentation":"Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.
", + "union":true + }, + "TemplateList":{ + "type":"list", + "member":{"shape":"TemplateSummary"} + }, + "TemplateName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^(?!^\\s+$)((?![\\x5c'\\x2b,;<=>#\\x22])([\\x20-\\x7E]))+$" + }, + "TemplateNameList":{ + "type":"list", + "member":{"shape":"TemplateName"}, + "max":100, + "min":1 + }, + "TemplateRevision":{ + "type":"structure", + "required":[ + "MajorRevision", + "MinorRevision" + ], + "members":{ + "MajorRevision":{ + "shape":"Integer", + "documentation":"The revision version of the template. Re-enrolling all certificate holders will increment the major revision.
" + }, + "MinorRevision":{ + "shape":"Integer", + "documentation":"The revision version of the template. Re-enrolling all certificate holders will increment the major revision.
" + } + }, + "documentation":"The revision version of the template. Template updates will increment the minor revision. Re-enrolling all certificate holders will increment the major revision.
" + }, + "TemplateStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "TemplateSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
" + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateConnector.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the template was created.
" + }, + "Definition":{ + "shape":"TemplateDefinition", + "documentation":"Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.
" + }, + "Name":{ + "shape":"TemplateName", + "documentation":"Name of the template. The template name must be unique.
" + }, + "ObjectIdentifier":{ + "shape":"CustomObjectIdentifier", + "documentation":"Object identifier of a template.
" + }, + "PolicySchema":{ + "shape":"Integer", + "documentation":"The template schema version. Template schema versions can be v2, v3, or v4. The template configuration options change based on the template schema version.
" + }, + "Revision":{ + "shape":"TemplateRevision", + "documentation":"The revision version of the template. Template updates will increment the minor revision. Re-enrolling all certificate holders will increment the major revision.
" + }, + "Status":{ + "shape":"TemplateStatus", + "documentation":"Status of the template. Status can be creating, active, deleting, or failed.
" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time that the template was updated.
" + } + }, + "documentation":"An Active Directory compatible certificate template. Connectors issue certificates against these templates based on the requestor's Active Directory group membership.
" + }, + "TemplateV2":{ + "type":"structure", + "required":[ + "CertificateValidity", + "EnrollmentFlags", + "Extensions", + "GeneralFlags", + "PrivateKeyAttributes", + "PrivateKeyFlags", + "SubjectNameFlags" + ], + "members":{ + "CertificateValidity":{ + "shape":"CertificateValidity", + "documentation":"Certificate validity describes the validity and renewal periods of a certificate.
" + }, + "EnrollmentFlags":{ + "shape":"EnrollmentFlagsV2", + "documentation":"Enrollment flags describe the enrollment settings for certificates such as using the existing private key and deleting expired or revoked certificates.
" + }, + "Extensions":{ + "shape":"ExtensionsV2", + "documentation":"Extensions describe the key usage extensions and application policies for a template.
" + }, + "GeneralFlags":{ + "shape":"GeneralFlagsV2", + "documentation":"General flags describe whether the template is used for computers or users and if the template can be used with autoenrollment.
" + }, + "PrivateKeyAttributes":{ + "shape":"PrivateKeyAttributesV2", + "documentation":"Private key attributes allow you to specify the minimal key length, key spec, and cryptographic providers for the private key of a certificate for v2 templates. V2 templates allow you to use Legacy Cryptographic Service Providers.
" + }, + "PrivateKeyFlags":{ + "shape":"PrivateKeyFlagsV2", + "documentation":"Private key flags for v2 templates specify the client compatibility, if the private key can be exported, and if user input is required when using a private key.
" + }, + "SubjectNameFlags":{ + "shape":"SubjectNameFlagsV2", + "documentation":"Subject name flags describe the subject name and subject alternate name that is included in a certificate.
" + }, + "SupersededTemplates":{ + "shape":"TemplateNameList", + "documentation":"List of templates in Active Directory that are superseded by this template.
" + } + }, + "documentation":"v2 template schema that uses Legacy Cryptographic Providers.
" + }, + "TemplateV3":{ + "type":"structure", + "required":[ + "CertificateValidity", + "EnrollmentFlags", + "Extensions", + "GeneralFlags", + "HashAlgorithm", + "PrivateKeyAttributes", + "PrivateKeyFlags", + "SubjectNameFlags" + ], + "members":{ + "CertificateValidity":{ + "shape":"CertificateValidity", + "documentation":"Certificate validity describes the validity and renewal periods of a certificate.
" + }, + "EnrollmentFlags":{ + "shape":"EnrollmentFlagsV3", + "documentation":"Enrollment flags describe the enrollment settings for certificates such as using the existing private key and deleting expired or revoked certificates.
" + }, + "Extensions":{ + "shape":"ExtensionsV3", + "documentation":"Extensions describe the key usage extensions and application policies for a template.
" + }, + "GeneralFlags":{ + "shape":"GeneralFlagsV3", + "documentation":"General flags describe whether the template is used for computers or users and if the template can be used with autoenrollment.
" + }, + "HashAlgorithm":{ + "shape":"HashAlgorithm", + "documentation":"Specifies the hash algorithm used to hash the private key.
" + }, + "PrivateKeyAttributes":{ + "shape":"PrivateKeyAttributesV3", + "documentation":"Private key attributes allow you to specify the algorithm, minimal key length, key spec, key usage, and cryptographic providers for the private key of a certificate for v3 templates. V3 templates allow you to use Key Storage Providers.
" + }, + "PrivateKeyFlags":{ + "shape":"PrivateKeyFlagsV3", + "documentation":"Private key flags for v3 templates specify the client compatibility, if the private key can be exported, if user input is required when using a private key, and if an alternate signature algorithm should be used.
" + }, + "SubjectNameFlags":{ + "shape":"SubjectNameFlagsV3", + "documentation":"Subject name flags describe the subject name and subject alternate name that is included in a certificate.
" + }, + "SupersededTemplates":{ + "shape":"TemplateNameList", + "documentation":"List of templates in Active Directory that are superseded by this template.
" + } + }, + "documentation":"v3 template schema that uses Key Storage Providers.
" + }, + "TemplateV4":{ + "type":"structure", + "required":[ + "CertificateValidity", + "EnrollmentFlags", + "Extensions", + "GeneralFlags", + "PrivateKeyAttributes", + "PrivateKeyFlags", + "SubjectNameFlags" + ], + "members":{ + "CertificateValidity":{ + "shape":"CertificateValidity", + "documentation":"Certificate validity describes the validity and renewal periods of a certificate.
" + }, + "EnrollmentFlags":{ + "shape":"EnrollmentFlagsV4", + "documentation":"Enrollment flags describe the enrollment settings for certificates using the existing private key and deleting expired or revoked certificates.
" + }, + "Extensions":{ + "shape":"ExtensionsV4", + "documentation":"Extensions describe the key usage extensions and application policies for a template.
" + }, + "GeneralFlags":{ + "shape":"GeneralFlagsV4", + "documentation":"General flags describe whether the template is used for computers or users and if the template can be used with autoenrollment.
" + }, + "HashAlgorithm":{ + "shape":"HashAlgorithm", + "documentation":"Specifies the hash algorithm used to hash the private key. Hash algorithm can only be specified when using Key Storage Providers.
" + }, + "PrivateKeyAttributes":{ + "shape":"PrivateKeyAttributesV4", + "documentation":"Private key attributes allow you to specify the minimal key length, key spec, key usage, and cryptographic providers for the private key of a certificate for v4 templates. V4 templates allow you to use either Key Storage Providers or Legacy Cryptographic Service Providers. You specify the cryptography provider category in private key flags.
" + }, + "PrivateKeyFlags":{ + "shape":"PrivateKeyFlagsV4", + "documentation":"Private key flags for v4 templates specify the client compatibility, if the private key can be exported, if user input is required when using a private key, if an alternate signature algorithm should be used, and if certificates are renewed using the same private key.
" + }, + "SubjectNameFlags":{ + "shape":"SubjectNameFlagsV4", + "documentation":"Subject name flags describe the subject name and subject alternate name that is included in a certificate.
" + }, + "SupersededTemplates":{ + "shape":"TemplateNameList", + "documentation":"List of templates in Active Directory that are superseded by this template.
" + } + }, + "documentation":"v4 template schema that can use either Legacy Cryptographic Providers or Key Storage Providers.
" + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"}, + "QuotaCode":{ + "shape":"String", + "documentation":"The code associated with the quota.
" + }, + "ServiceCode":{ + "shape":"String", + "documentation":"Identifies the originating service.
" + } + }, + "documentation":"The limit on the number of requests per second was exceeded.
", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) that was returned when you created the resource.
", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"Specifies a list of tag keys that you want to remove from the specified resources.
", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UpdateTemplateGroupAccessControlEntryRequest":{ + "type":"structure", + "required":[ + "GroupSecurityIdentifier", + "TemplateArn" + ], + "members":{ + "AccessRights":{ + "shape":"AccessRights", + "documentation":"Allow or deny permissions for an Active Directory group to enroll or autoenroll certificates for a template.
" + }, + "GroupDisplayName":{ + "shape":"DisplayName", + "documentation":"Name of the Active Directory group. This name does not need to match the group name in Active Directory.
" + }, + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".
", + "location":"uri", + "locationName":"GroupSecurityIdentifier" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "UpdateTemplateRequest":{ + "type":"structure", + "required":["TemplateArn"], + "members":{ + "Definition":{ + "shape":"TemplateDefinition", + "documentation":"Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.
" + }, + "ReenrollAllCertificateHolders":{ + "shape":"Boolean", + "documentation":"This setting allows the major version of a template to be increased automatically. All members of Active Directory groups that are allowed to enroll with a template will receive a new certificate issued using that template.
" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.
", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"}, + "Reason":{ + "shape":"ValidationExceptionReason", + "documentation":"The reason for the validation error. This won't be return for every validation exception.
" + } + }, + "documentation":"An input validation error occurred. For example, invalid characters in a template name, or if a pagination token is invalid.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "FIELD_VALIDATION_FAILED", + "INVALID_PERMISSION", + "INVALID_STATE", + "MISMATCHED_CONNECTOR", + "MISMATCHED_VPC", + "NO_CLIENT_TOKEN", + "UNKNOWN_OPERATION", + "OTHER" + ] + }, + "ValidityPeriod":{ + "type":"structure", + "required":[ + "Period", + "PeriodType" + ], + "members":{ + "Period":{ + "shape":"ValidityPeriodPeriodLong", + "documentation":"The numeric value for the validity period.
" + }, + "PeriodType":{ + "shape":"ValidityPeriodType", + "documentation":"The unit of time. You can select hours, days, weeks, months, and years.
" + } + }, + "documentation":"Information describing the end of the validity period of the certificate. This parameter sets the “Not After” date for the certificate. Certificate validity is the period of time during which a certificate is valid. Validity can be expressed as an explicit date and time when the certificate expires, or as a span of time after issuance, stated in hours, days, months, or years. For more information, see Validity in RFC 5280. This value is unaffected when ValidityNotBefore is also specified. For example, if Validity is set to 20 days in the future, the certificate will expire 20 days from issuance time regardless of the ValidityNotBefore value.
" + }, + "ValidityPeriodPeriodLong":{ + "type":"long", + "box":true, + "max":8766000, + "min":1 + }, + "ValidityPeriodType":{ + "type":"string", + "enum":[ + "HOURS", + "DAYS", + "WEEKS", + "MONTHS", + "YEARS" + ] + }, + "VpcInformation":{ + "type":"structure", + "required":["SecurityGroupIds"], + "members":{ + "SecurityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"The security groups used with the connector. You can use a maximum of 4 security groups with a connector.
" + } + }, + "documentation":"Information about your VPC and security groups used with the connector.
" + } + }, + "documentation":"Amazon Web Services Private CA Connector for Active Directory creates a connector between Amazon Web Services Private CA and Active Directory (AD) that enables you to provision security certificates for AD signed by a private CA that you own. For more information, see Amazon Web Services Private CA Connector for Active Directory.
" +} diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 8828edabe9..61e390ac51 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -5860,6 +5860,10 @@ "WorkspaceSettings":{ "shape":"WorkspaceSettings", "documentation":"The workspace settings for the SageMaker Canvas application.
" + }, + "IdentityProviderOAuthSettings":{ + "shape":"IdentityProviderOAuthSettings", + "documentation":"The settings for connecting to an external data source with OAuth.
" } }, "documentation":"The SageMaker Canvas application settings.
" @@ -9875,6 +9879,13 @@ }, "documentation":"Describes the location of the channel data.
" }, + "DataSourceName":{ + "type":"string", + "enum":[ + "SalesforceGenie", + "Snowflake" + ] + }, "Database":{ "type":"string", "max":255, @@ -18456,6 +18467,29 @@ "max":128, "min":32 }, + "IdentityProviderOAuthSetting":{ + "type":"structure", + "members":{ + "DataSourceName":{ + "shape":"DataSourceName", + "documentation":"The name of the data source that you're connecting to. Canvas currently supports OAuth for Snowflake and Salesforce Data Cloud.
" + }, + "Status":{ + "shape":"FeatureStatus", + "documentation":"Describes whether OAuth for a data source is enabled or disabled in the Canvas application.
" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"The ARN of an Amazon Web Services Secrets Manager secret that stores the credentials from your identity provider, such as the client ID and secret, authorization URL, and token URL.
" + } + }, + "documentation":"The Amazon SageMaker Canvas app setting where you configure OAuth for connecting to an external data source, such as Snowflake.
" + }, + "IdentityProviderOAuthSettings":{ + "type":"list", + "member":{"shape":"IdentityProviderOAuthSetting"}, + "max":20 + }, "Image":{ "type":"structure", "required":[ @@ -29217,7 +29251,7 @@ }, "JobDurationInSeconds":{ "shape":"JobDurationInSeconds", - "documentation":"Specifies the maximum duration of the job, in seconds. The maximum value is 7200.
" + "documentation":"Specifies the maximum duration of the job, in seconds. The maximum value is 18,000 seconds.
" }, "TrafficPattern":{ "shape":"TrafficPattern", diff --git a/docs/source/conf.py b/docs/source/conf.py index 55dd82fbb5..497c086296 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.31.' # The full version, including alpha/beta/rc tags. -release = '1.31.37' +release = '1.31.38' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/endpoint-rules/cleanrooms/endpoint-tests-1.json b/tests/functional/endpoint-rules/cleanrooms/endpoint-tests-1.json index a128e3bf3d..412e999790 100644 --- a/tests/functional/endpoint-rules/cleanrooms/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/cleanrooms/endpoint-tests-1.json @@ -1,53 +1,53 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cleanrooms-fips.us-gov-east-1.api.aws" + "url": "https://cleanrooms-fips.us-east-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms-fips.us-gov-east-1.amazonaws.com" + "url": "https://cleanrooms-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cleanrooms.us-gov-east-1.api.aws" + "url": "https://cleanrooms.us-east-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms.us-gov-east-1.amazonaws.com" + "url": "https://cleanrooms.us-east-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false } @@ -105,101 +105,101 @@ } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://cleanrooms-fips.us-gov-east-1.api.aws" + } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-gov-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://cleanrooms-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-gov-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://cleanrooms.us-gov-east-1.api.aws" + } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-gov-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms.us-iso-east-1.c2s.ic.gov" + "url": "https://cleanrooms.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-gov-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cleanrooms-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms-fips.us-east-1.amazonaws.com" + "url": "https://cleanrooms-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cleanrooms.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms.us-east-1.amazonaws.com" + "url": "https://cleanrooms.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": false } @@ -253,7 +253,7 @@ } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -266,6 +266,19 @@ "Endpoint": "https://example.com" } }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, { "documentation": "For custom endpoint with fips enabled and dualstack disabled", "expect": { @@ -289,6 +302,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/tests/functional/endpoint-rules/neptunedata/endpoint-tests-1.json b/tests/functional/endpoint-rules/neptunedata/endpoint-tests-1.json new file mode 100644 index 0000000000..0c56273f24 --- /dev/null +++ b/tests/functional/endpoint-rules/neptunedata/endpoint-tests-1.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/endpoint-rules/pca-connector-ad/endpoint-tests-1.json b/tests/functional/endpoint-rules/pca-connector-ad/endpoint-tests-1.json new file mode 100644 index 0000000000..42408f7295 --- /dev/null +++ b/tests/functional/endpoint-rules/pca-connector-ad/endpoint-tests-1.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file