diff --git a/.changelog/7315b1709e3441a9958086aaf322d2e2.json b/.changelog/7315b1709e3441a9958086aaf322d2e2.json new file mode 100644 index 00000000000..7f63df76d9a --- /dev/null +++ b/.changelog/7315b1709e3441a9958086aaf322d2e2.json @@ -0,0 +1,8 @@ +{ + "id": "7315b170-9e34-41a9-9580-86aaf322d2e2", + "type": "feature", + "description": "This release introduces APIs to manage DbClusters and adds support for read replicas", + "modules": [ + "service/timestreaminfluxdb" + ] +} \ No newline at end of file diff --git a/.changelog/85b79a385fa44c0b959624202755afc9.json b/.changelog/85b79a385fa44c0b959624202755afc9.json new file mode 100644 index 00000000000..13caa9c32f6 --- /dev/null +++ b/.changelog/85b79a385fa44c0b959624202755afc9.json @@ -0,0 +1,8 @@ +{ + "id": "85b79a38-5fa4-4c0b-9596-24202755afc9", + "type": "feature", + "description": "Add ComputeRoleArn to CreateApp, UpdateApp, CreateBranch, and UpdateBranch, allowing caller to specify a role to be assumed by Amplify Hosting for server-side rendered applications.", + "modules": [ + "service/amplify" + ] +} \ No newline at end of file diff --git a/.changelog/fa2070c38ce849939f5717e5dcb5d201.json b/.changelog/fa2070c38ce849939f5717e5dcb5d201.json new file mode 100644 index 00000000000..3e5c4d79e0c --- /dev/null +++ b/.changelog/fa2070c38ce849939f5717e5dcb5d201.json @@ -0,0 +1,8 @@ +{ + "id": "fa2070c3-8ce8-4993-9f57-17e5dcb5d201", + "type": "feature", + "description": "Support replicationConfigArn in DMS DescribeApplicableIndividualAssessments API.", + "modules": [ + "service/databasemigrationservice" + ] +} \ No newline at end of file diff --git a/service/amplify/api_op_CreateApp.go b/service/amplify/api_op_CreateApp.go index 4373a9c8dc3..50817b60ce1 100644 --- a/service/amplify/api_op_CreateApp.go +++ b/service/amplify/api_op_CreateApp.go @@ -68,6 +68,14 @@ type CreateAppInput struct { // The cache configuration for the Amplify app. CacheConfig *types.CacheConfig + // The Amazon Resource Name (ARN) of the IAM role to assign to an SSR app. The SSR + // Compute role allows the Amplify Hosting compute service to securely access + // specific Amazon Web Services resources based on the role's permissions. For more + // information about the SSR Compute role, see [Adding an SSR Compute role]in the Amplify User Guide. + // + // [Adding an SSR Compute role]: https://docs.aws.amazon.com/latest/userguide/amplify-SSR-compute-role.html + ComputeRoleArn *string + // The custom HTTP headers for an Amplify app. CustomHeaders *string @@ -99,7 +107,7 @@ type CreateAppInput struct { // [Amplify Environment variables]: https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html EnvironmentVariables map[string]string - // The AWS Identity and Access Management (IAM) service role for an Amplify app. + // The Amazon Resource Name (ARN) of the IAM service role for the Amplify app. IamServiceRoleArn *string // The OAuth token for a third-party source control system for an Amplify app. The diff --git a/service/amplify/api_op_CreateBranch.go b/service/amplify/api_op_CreateBranch.go index 0ce1cc82182..ab549796d48 100644 --- a/service/amplify/api_op_CreateBranch.go +++ b/service/amplify/api_op_CreateBranch.go @@ -62,6 +62,14 @@ type CreateBranchInput struct { // The build specification (build spec) for the branch. BuildSpec *string + // The Amazon Resource Name (ARN) of the IAM role to assign to a branch of an SSR + // app. The SSR Compute role allows the Amplify Hosting compute service to securely + // access specific Amazon Web Services resources based on the role's permissions. + // For more information about the SSR Compute role, see [Adding an SSR Compute role]in the Amplify User Guide. + // + // [Adding an SSR Compute role]: https://docs.aws.amazon.com/latest/userguide/amplify-SSR-compute-role.html + ComputeRoleArn *string + // The description for the branch. Description *string diff --git a/service/amplify/api_op_UpdateApp.go b/service/amplify/api_op_UpdateApp.go index 608b59247ff..35edb5f94a4 100644 --- a/service/amplify/api_op_UpdateApp.go +++ b/service/amplify/api_op_UpdateApp.go @@ -67,6 +67,14 @@ type UpdateAppInput struct { // The cache configuration for the Amplify app. CacheConfig *types.CacheConfig + // The Amazon Resource Name (ARN) of the IAM role to assign to an SSR app. The SSR + // Compute role allows the Amplify Hosting compute service to securely access + // specific Amazon Web Services resources based on the role's permissions. For more + // information about the SSR Compute role, see [Adding an SSR Compute role]in the Amplify User Guide. + // + // [Adding an SSR Compute role]: https://docs.aws.amazon.com/latest/userguide/amplify-SSR-compute-role.html + ComputeRoleArn *string + // The custom HTTP headers for an Amplify app. CustomHeaders *string @@ -92,7 +100,7 @@ type UpdateAppInput struct { // The environment variables for an Amplify app. EnvironmentVariables map[string]string - // The AWS Identity and Access Management (IAM) service role for an Amplify app. + // The Amazon Resource Name (ARN) of the IAM service role for the Amplify app. IamServiceRoleArn *string // The name for an Amplify app. diff --git a/service/amplify/api_op_UpdateBranch.go b/service/amplify/api_op_UpdateBranch.go index 02778dcb8b4..6188f278960 100644 --- a/service/amplify/api_op_UpdateBranch.go +++ b/service/amplify/api_op_UpdateBranch.go @@ -62,6 +62,14 @@ type UpdateBranchInput struct { // The build specification (build spec) for the branch. BuildSpec *string + // The Amazon Resource Name (ARN) of the IAM role to assign to a branch of an SSR + // app. The SSR Compute role allows the Amplify Hosting compute service to securely + // access specific Amazon Web Services resources based on the role's permissions. + // For more information about the SSR Compute role, see [Adding an SSR Compute role]in the Amplify User Guide. + // + // [Adding an SSR Compute role]: https://docs.aws.amazon.com/latest/userguide/amplify-SSR-compute-role.html + ComputeRoleArn *string + // The description for the branch. Description *string diff --git a/service/amplify/deserializers.go b/service/amplify/deserializers.go index 23e1862b2d2..d475f3a3ebf 100644 --- a/service/amplify/deserializers.go +++ b/service/amplify/deserializers.go @@ -6329,6 +6329,15 @@ func awsRestjson1_deserializeDocumentApp(v **types.App, value interface{}) error return err } + case "computeRoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ComputeRoleArn to be of type string, got %T instead", value) + } + sv.ComputeRoleArn = ptr.String(jtv) + } + case "createTime": if value != nil { switch jtv := value.(type) { @@ -7160,6 +7169,15 @@ func awsRestjson1_deserializeDocumentBranch(v **types.Branch, value interface{}) sv.BuildSpec = ptr.String(jtv) } + case "computeRoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ComputeRoleArn to be of type string, got %T instead", value) + } + sv.ComputeRoleArn = ptr.String(jtv) + } + case "createTime": if value != nil { switch jtv := value.(type) { diff --git a/service/amplify/serializers.go b/service/amplify/serializers.go index 291796b324f..ee6041b1b0c 100644 --- a/service/amplify/serializers.go +++ b/service/amplify/serializers.go @@ -125,6 +125,11 @@ func awsRestjson1_serializeOpDocumentCreateAppInput(v *CreateAppInput, value smi } } + if v.ComputeRoleArn != nil { + ok := object.Key("computeRoleArn") + ok.String(*v.ComputeRoleArn) + } + if v.CustomHeaders != nil { ok := object.Key("customHeaders") ok.String(*v.CustomHeaders) @@ -421,6 +426,11 @@ func awsRestjson1_serializeOpDocumentCreateBranchInput(v *CreateBranchInput, val ok.String(*v.BuildSpec) } + if v.ComputeRoleArn != nil { + ok := object.Key("computeRoleArn") + ok.String(*v.ComputeRoleArn) + } + if v.Description != nil { ok := object.Key("description") ok.String(*v.Description) @@ -3210,6 +3220,11 @@ func awsRestjson1_serializeOpDocumentUpdateAppInput(v *UpdateAppInput, value smi } } + if v.ComputeRoleArn != nil { + ok := object.Key("computeRoleArn") + ok.String(*v.ComputeRoleArn) + } + if v.CustomHeaders != nil { ok := object.Key("customHeaders") ok.String(*v.CustomHeaders) @@ -3399,6 +3414,11 @@ func awsRestjson1_serializeOpDocumentUpdateBranchInput(v *UpdateBranchInput, val ok.String(*v.BuildSpec) } + if v.ComputeRoleArn != nil { + ok := object.Key("computeRoleArn") + ok.String(*v.ComputeRoleArn) + } + if v.Description != nil { ok := object.Key("description") ok.String(*v.Description) diff --git a/service/amplify/types/types.go b/service/amplify/types/types.go index 9dc58a6e728..a0cb61cd013 100644 --- a/service/amplify/types/types.go +++ b/service/amplify/types/types.go @@ -101,6 +101,14 @@ type App struct { // configuration type , Amplify uses the default AMPLIFY_MANAGED setting. CacheConfig *CacheConfig + // The Amazon Resource Name (ARN) of the IAM role for an SSR app. The Compute role + // allows the Amplify Hosting compute service to securely access specific Amazon + // Web Services resources based on the role's permissions. For more information + // about the SSR Compute role, see [Adding an SSR Compute role]in the Amplify User Guide. + // + // [Adding an SSR Compute role]: https://docs.aws.amazon.com/latest/userguide/amplify-SSR-compute-role.html + ComputeRoleArn *string + // Describes the custom HTTP headers for the Amplify app. CustomHeaders *string @@ -114,8 +122,7 @@ type App struct { // branch from your Git repository. EnableBranchAutoDeletion *bool - // The AWS Identity and Access Management (IAM) service role for the Amazon - // Resource Name (ARN) of the Amplify app. + // The Amazon Resource Name (ARN) of the IAM service role for the Amplify app. IamServiceRoleArn *string // Describes the information about a production branch of the Amplify app. @@ -365,6 +372,14 @@ type Branch struct { // The build specification (build spec) content for the branch of an Amplify app. BuildSpec *string + // The Amazon Resource Name (ARN) of the IAM role for a branch of an SSR app. The + // Compute role allows the Amplify Hosting compute service to securely access + // specific Amazon Web Services resources based on the role's permissions. For more + // information about the SSR Compute role, see [Adding an SSR Compute role]in the Amplify User Guide. + // + // [Adding an SSR Compute role]: https://docs.aws.amazon.com/latest/userguide/amplify-SSR-compute-role.html + ComputeRoleArn *string + // The destination branch if the branch is a pull request branch. DestinationBranch *string diff --git a/service/databasemigrationservice/api_op_DescribeApplicableIndividualAssessments.go b/service/databasemigrationservice/api_op_DescribeApplicableIndividualAssessments.go index 2d0be86f38f..1324232a173 100644 --- a/service/databasemigrationservice/api_op_DescribeApplicableIndividualAssessments.go +++ b/service/databasemigrationservice/api_op_DescribeApplicableIndividualAssessments.go @@ -63,6 +63,10 @@ type DescribeApplicableIndividualAssessmentsInput struct { // support. MigrationType types.MigrationTypeValue + // Amazon Resource Name (ARN) of a serverless replication on which you want to + // base the default list of individual assessments. + ReplicationConfigArn *string + // ARN of a replication instance on which you want to base the default list of // individual assessments. ReplicationInstanceArn *string diff --git a/service/databasemigrationservice/serializers.go b/service/databasemigrationservice/serializers.go index 1e3b305a7e4..f547cf3e986 100644 --- a/service/databasemigrationservice/serializers.go +++ b/service/databasemigrationservice/serializers.go @@ -10398,6 +10398,11 @@ func awsAwsjson11_serializeOpDocumentDescribeApplicableIndividualAssessmentsInpu ok.String(string(v.MigrationType)) } + if v.ReplicationConfigArn != nil { + ok := object.Key("ReplicationConfigArn") + ok.String(*v.ReplicationConfigArn) + } + if v.ReplicationInstanceArn != nil { ok := object.Key("ReplicationInstanceArn") ok.String(*v.ReplicationInstanceArn) diff --git a/service/securityhub/internal/endpoints/endpoints.go b/service/securityhub/internal/endpoints/endpoints.go index 5d57fe4198d..e9f0ddd7e97 100644 --- a/service/securityhub/internal/endpoints/endpoints.go +++ b/service/securityhub/internal/endpoints/endpoints.go @@ -247,6 +247,15 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "securityhub.ap-southeast-5.api.aws", }, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "securityhub.ap-southeast-7.api.aws", + }, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, @@ -400,6 +409,15 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "securityhub.me-south-1.api.aws", }, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "mx-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "securityhub.mx-central-1.api.aws", + }, endpoints.EndpointKey{ Region: "sa-east-1", }: endpoints.Endpoint{}, diff --git a/service/storagegateway/internal/endpoints/endpoints.go b/service/storagegateway/internal/endpoints/endpoints.go index ad3ea371f92..0dcd8be650e 100644 --- a/service/storagegateway/internal/endpoints/endpoints.go +++ b/service/storagegateway/internal/endpoints/endpoints.go @@ -244,6 +244,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "sa-east-1", }: endpoints.Endpoint{}, diff --git a/service/timestreaminfluxdb/api_op_CreateDbCluster.go b/service/timestreaminfluxdb/api_op_CreateDbCluster.go new file mode 100644 index 00000000000..a044a219ceb --- /dev/null +++ b/service/timestreaminfluxdb/api_op_CreateDbCluster.go @@ -0,0 +1,263 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package timestreaminfluxdb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new Timestream for InfluxDB cluster. +func (c *Client) CreateDbCluster(ctx context.Context, params *CreateDbClusterInput, optFns ...func(*Options)) (*CreateDbClusterOutput, error) { + if params == nil { + params = &CreateDbClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateDbCluster", params, optFns, c.addOperationCreateDbClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateDbClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateDbClusterInput struct { + + // The amount of storage to allocate for your DB storage type in GiB (gibibytes). + // + // This member is required. + AllocatedStorage *int32 + + // The Timestream for InfluxDB DB instance type to run InfluxDB on. + // + // This member is required. + DbInstanceType types.DbInstanceType + + // Specifies the type of cluster to create. + // + // This member is required. + DeploymentType types.ClusterDeploymentType + + // The name that uniquely identifies the DB cluster when interacting with the + // Amazon Timestream for InfluxDB API and CLI commands. This name will also be a + // prefix included in the endpoint. DB cluster names must be unique per customer + // and per region. + // + // This member is required. + Name *string + + // The password of the initial admin user created in InfluxDB. This password will + // allow you to access the InfluxDB UI to perform various administrative tasks and + // also use the InfluxDB CLI to create an operator token. These attributes will be + // stored in a secret created in Amazon Web Services Secrets Manager in your + // account. + // + // This member is required. + Password *string + + // A list of VPC security group IDs to associate with the Timestream for InfluxDB + // cluster. + // + // This member is required. + VpcSecurityGroupIds []string + + // A list of VPC subnet IDs to associate with the DB cluster. Provide at least two + // VPC subnet IDs in different Availability Zones when deploying with a Multi-AZ + // standby. + // + // This member is required. + VpcSubnetIds []string + + // The name of the initial InfluxDB bucket. All InfluxDB data is stored in a + // bucket. A bucket combines the concept of a database and a retention period (the + // duration of time that each data point persists). A bucket belongs to an + // organization. + Bucket *string + + // The ID of the DB parameter group to assign to your DB cluster. DB parameter + // groups specify how the database is configured. For example, DB parameter groups + // can specify the limit for query concurrency. + DbParameterGroupIdentifier *string + + // The Timestream for InfluxDB DB storage type to read and write InfluxDB data. + // + // You can choose between three different types of provisioned Influx IOPS + // Included storage according to your workload requirements: + // + // - Influx I/O Included 3000 IOPS + // + // - Influx I/O Included 12000 IOPS + // + // - Influx I/O Included 16000 IOPS + DbStorageType types.DbStorageType + + // Specifies the behavior of failure recovery when the primary node of the cluster + // fails. + FailoverMode types.FailoverMode + + // Configuration for sending InfluxDB engine logs to a specified S3 bucket. + LogDeliveryConfiguration *types.LogDeliveryConfiguration + + // Specifies whether the network type of the Timestream for InfluxDB cluster is + // IPv4, which can communicate over IPv4 protocol only, or DUAL, which can + // communicate over both IPv4 and IPv6 protocols. + NetworkType types.NetworkType + + // The name of the initial organization for the initial admin user in InfluxDB. An + // InfluxDB organization is a workspace for a group of users. + Organization *string + + // The port number on which InfluxDB accepts connections. + // + // Valid Values: 1024-65535 + // + // Default: 8086 + // + // Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680 + Port *int32 + + // Configures the Timestream for InfluxDB cluster with a public IP to facilitate + // access from outside the VPC. + PubliclyAccessible *bool + + // A list of key-value pairs to associate with the DB instance. + Tags map[string]string + + // The username of the initial admin user created in InfluxDB. Must start with a + // letter and can't end with a hyphen or contain two consecutive hyphens. For + // example, my-user1. This username will allow you to access the InfluxDB UI to + // perform various administrative tasks and also use the InfluxDB CLI to create an + // operator token. These attributes will be stored in a secret created in Amazon + // Web Services Secrets Manager in your account. + Username *string + + noSmithyDocumentSerde +} + +type CreateDbClusterOutput struct { + + // A service-generated unique identifier. + DbClusterId *string + + // The status of the DB cluster. + DbClusterStatus types.ClusterStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateDbClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpCreateDbCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpCreateDbCluster{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateDbCluster"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateDbClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDbCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateDbCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateDbCluster", + } +} diff --git a/service/timestreaminfluxdb/api_op_CreateDbInstance.go b/service/timestreaminfluxdb/api_op_CreateDbInstance.go index 2608fb95fbb..ffc9fbc93ec 100644 --- a/service/timestreaminfluxdb/api_op_CreateDbInstance.go +++ b/service/timestreaminfluxdb/api_op_CreateDbInstance.go @@ -161,6 +161,9 @@ type CreateDbInstanceOutput struct { // The Availability Zone in which the DB instance resides. AvailabilityZone *string + // Specifies the DbCluster to which this DbInstance belongs to. + DbClusterId *string + // The Timestream for InfluxDB instance type that InfluxDB runs on. DbInstanceType types.DbInstanceType @@ -183,6 +186,9 @@ type CreateDbInstanceOutput struct { // organization, bucket, username, and password. InfluxAuthParametersSecretArn *string + // Specifies the DbInstance's role in the cluster. + InstanceMode types.InstanceMode + // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. LogDeliveryConfiguration *types.LogDeliveryConfiguration diff --git a/service/timestreaminfluxdb/api_op_DeleteDbCluster.go b/service/timestreaminfluxdb/api_op_DeleteDbCluster.go new file mode 100644 index 00000000000..f9451d796ce --- /dev/null +++ b/service/timestreaminfluxdb/api_op_DeleteDbCluster.go @@ -0,0 +1,157 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package timestreaminfluxdb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a Timestream for InfluxDB cluster. +func (c *Client) DeleteDbCluster(ctx context.Context, params *DeleteDbClusterInput, optFns ...func(*Options)) (*DeleteDbClusterOutput, error) { + if params == nil { + params = &DeleteDbClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteDbCluster", params, optFns, c.addOperationDeleteDbClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteDbClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteDbClusterInput struct { + + // Service-generated unique identifier of the DB cluster. + // + // This member is required. + DbClusterId *string + + noSmithyDocumentSerde +} + +type DeleteDbClusterOutput struct { + + // The status of the DB cluster. + DbClusterStatus types.ClusterStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteDbClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteDbCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteDbCluster{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteDbCluster"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteDbClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteDbCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteDbCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteDbCluster", + } +} diff --git a/service/timestreaminfluxdb/api_op_DeleteDbInstance.go b/service/timestreaminfluxdb/api_op_DeleteDbInstance.go index d0b7ef1bfec..31f0b7e8d3b 100644 --- a/service/timestreaminfluxdb/api_op_DeleteDbInstance.go +++ b/service/timestreaminfluxdb/api_op_DeleteDbInstance.go @@ -66,6 +66,9 @@ type DeleteDbInstanceOutput struct { // The Availability Zone in which the DB instance resides. AvailabilityZone *string + // Specifies the DbCluster to which this DbInstance belongs to. + DbClusterId *string + // The Timestream for InfluxDB instance type that InfluxDB runs on. DbInstanceType types.DbInstanceType @@ -88,6 +91,9 @@ type DeleteDbInstanceOutput struct { // organization, bucket, username, and password. InfluxAuthParametersSecretArn *string + // Specifies the DbInstance's role in the cluster. + InstanceMode types.InstanceMode + // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. LogDeliveryConfiguration *types.LogDeliveryConfiguration diff --git a/service/timestreaminfluxdb/api_op_GetDbCluster.go b/service/timestreaminfluxdb/api_op_GetDbCluster.go new file mode 100644 index 00000000000..5691838dc86 --- /dev/null +++ b/service/timestreaminfluxdb/api_op_GetDbCluster.go @@ -0,0 +1,225 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package timestreaminfluxdb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves information about a Timestream for InfluxDB cluster. +func (c *Client) GetDbCluster(ctx context.Context, params *GetDbClusterInput, optFns ...func(*Options)) (*GetDbClusterOutput, error) { + if params == nil { + params = &GetDbClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDbCluster", params, optFns, c.addOperationGetDbClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetDbClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetDbClusterInput struct { + + // Service-generated unique identifier of the DB cluster to retrieve. + // + // This member is required. + DbClusterId *string + + noSmithyDocumentSerde +} + +type GetDbClusterOutput struct { + + // The Amazon Resource Name (ARN) of the DB cluster. + // + // This member is required. + Arn *string + + // Service-generated unique identifier of the DB cluster to retrieve. + // + // This member is required. + Id *string + + // Customer-supplied name of the Timestream for InfluxDB cluster. + // + // This member is required. + Name *string + + // The amount of storage allocated for your DB storage type (in gibibytes). + AllocatedStorage *int32 + + // The Timestream for InfluxDB instance type that InfluxDB runs on. + DbInstanceType types.DbInstanceType + + // The ID of the DB parameter group assigned to your DB cluster. + DbParameterGroupIdentifier *string + + // The Timestream for InfluxDB DB storage type that InfluxDB stores data on. + DbStorageType types.DbStorageType + + // Deployment type of the DB cluster. + DeploymentType types.ClusterDeploymentType + + // The endpoint used to connect to the Timestream for InfluxDB cluster for write + // and read operations. + Endpoint *string + + // The configured failover mode for the DB cluster. + FailoverMode types.FailoverMode + + // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager + // secret containing the initial InfluxDB authorization parameters. The secret + // value is a JSON formatted key-value pair holding InfluxDB authorization values: + // organization, bucket, username, and password. + InfluxAuthParametersSecretArn *string + + // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. + LogDeliveryConfiguration *types.LogDeliveryConfiguration + + // Specifies whether the network type of the Timestream for InfluxDB cluster is + // IPv4, which can communicate over IPv4 protocol only, or DUAL, which can + // communicate over both IPv4 and IPv6 protocols. + NetworkType types.NetworkType + + // The port number on which InfluxDB accepts connections. + Port *int32 + + // Indicates if the DB cluster has a public IP to facilitate access from outside + // the VPC. + PubliclyAccessible *bool + + // The endpoint used to connect to the Timestream for InfluxDB cluster for + // read-only operations. + ReaderEndpoint *string + + // The status of the DB cluster. + Status types.ClusterStatus + + // A list of VPC security group IDs associated with the DB cluster. + VpcSecurityGroupIds []string + + // A list of VPC subnet IDs associated with the DB cluster. + VpcSubnetIds []string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetDbClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpGetDbCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpGetDbCluster{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetDbCluster"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetDbClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDbCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetDbCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetDbCluster", + } +} diff --git a/service/timestreaminfluxdb/api_op_GetDbInstance.go b/service/timestreaminfluxdb/api_op_GetDbInstance.go index 45b59e3ecbf..07bbbe29aec 100644 --- a/service/timestreaminfluxdb/api_op_GetDbInstance.go +++ b/service/timestreaminfluxdb/api_op_GetDbInstance.go @@ -66,6 +66,9 @@ type GetDbInstanceOutput struct { // The Availability Zone in which the DB instance resides. AvailabilityZone *string + // Specifies the DbCluster to which this DbInstance belongs to. + DbClusterId *string + // The Timestream for InfluxDB instance type that InfluxDB runs on. DbInstanceType types.DbInstanceType @@ -88,6 +91,9 @@ type GetDbInstanceOutput struct { // organization, bucket, username, and password. InfluxAuthParametersSecretArn *string + // Specifies the DbInstance's role in the cluster. + InstanceMode types.InstanceMode + // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. LogDeliveryConfiguration *types.LogDeliveryConfiguration diff --git a/service/timestreaminfluxdb/api_op_ListDbClusters.go b/service/timestreaminfluxdb/api_op_ListDbClusters.go new file mode 100644 index 00000000000..6255af0e1bf --- /dev/null +++ b/service/timestreaminfluxdb/api_op_ListDbClusters.go @@ -0,0 +1,261 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package timestreaminfluxdb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of Timestream for InfluxDB DB clusters. +func (c *Client) ListDbClusters(ctx context.Context, params *ListDbClustersInput, optFns ...func(*Options)) (*ListDbClustersOutput, error) { + if params == nil { + params = &ListDbClustersInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListDbClusters", params, optFns, c.addOperationListDbClustersMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListDbClustersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListDbClustersInput struct { + + // The maximum number of items to return in the output. If the total number of + // items available is more than the value specified, a nextToken is provided in the + // output. To resume pagination, provide the nextToken value as an argument of a + // subsequent API invocation. + MaxResults *int32 + + // The pagination token. To resume pagination, provide the nextToken value as an + // argument of a subsequent API invocation. + NextToken *string + + noSmithyDocumentSerde +} + +type ListDbClustersOutput struct { + + // A list of Timestream for InfluxDB cluster summaries. + // + // This member is required. + Items []types.DbClusterSummary + + // Token from a previous call of the operation. When this value is provided, the + // service returns results from where the previous response left off. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListDbClustersMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListDbClusters{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListDbClusters{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListDbClusters"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDbClusters(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListDbClustersPaginatorOptions is the paginator options for ListDbClusters +type ListDbClustersPaginatorOptions struct { + // The maximum number of items to return in the output. If the total number of + // items available is more than the value specified, a nextToken is provided in the + // output. To resume pagination, provide the nextToken value as an argument of a + // subsequent API invocation. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListDbClustersPaginator is a paginator for ListDbClusters +type ListDbClustersPaginator struct { + options ListDbClustersPaginatorOptions + client ListDbClustersAPIClient + params *ListDbClustersInput + nextToken *string + firstPage bool +} + +// NewListDbClustersPaginator returns a new ListDbClustersPaginator +func NewListDbClustersPaginator(client ListDbClustersAPIClient, params *ListDbClustersInput, optFns ...func(*ListDbClustersPaginatorOptions)) *ListDbClustersPaginator { + if params == nil { + params = &ListDbClustersInput{} + } + + options := ListDbClustersPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListDbClustersPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListDbClustersPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListDbClusters page. +func (p *ListDbClustersPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDbClustersOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListDbClusters(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListDbClustersAPIClient is a client that implements the ListDbClusters +// operation. +type ListDbClustersAPIClient interface { + ListDbClusters(context.Context, *ListDbClustersInput, ...func(*Options)) (*ListDbClustersOutput, error) +} + +var _ ListDbClustersAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListDbClusters(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListDbClusters", + } +} diff --git a/service/timestreaminfluxdb/api_op_ListDbInstancesForCluster.go b/service/timestreaminfluxdb/api_op_ListDbInstancesForCluster.go new file mode 100644 index 00000000000..ab0f09ec91b --- /dev/null +++ b/service/timestreaminfluxdb/api_op_ListDbInstancesForCluster.go @@ -0,0 +1,271 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package timestreaminfluxdb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of Timestream for InfluxDB clusters. +func (c *Client) ListDbInstancesForCluster(ctx context.Context, params *ListDbInstancesForClusterInput, optFns ...func(*Options)) (*ListDbInstancesForClusterOutput, error) { + if params == nil { + params = &ListDbInstancesForClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListDbInstancesForCluster", params, optFns, c.addOperationListDbInstancesForClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListDbInstancesForClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListDbInstancesForClusterInput struct { + + // Service-generated unique identifier of the DB cluster. + // + // This member is required. + DbClusterId *string + + // The maximum number of items to return in the output. If the total number of + // items available is more than the value specified, a nextToken is provided in the + // output. To resume pagination, provide the nextToken value as an argument of a + // subsequent API invocation. + MaxResults *int32 + + // The pagination token. To resume pagination, provide the nextToken value as an + // argument of a subsequent API invocation. + NextToken *string + + noSmithyDocumentSerde +} + +type ListDbInstancesForClusterOutput struct { + + // A list of Timestream for InfluxDB instance summaries belonging to the cluster. + // + // This member is required. + Items []types.DbInstanceForClusterSummary + + // Token from a previous call of the operation. When this value is provided, the + // service returns results from where the previous response left off. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListDbInstancesForClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListDbInstancesForCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListDbInstancesForCluster{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListDbInstancesForCluster"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListDbInstancesForClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDbInstancesForCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListDbInstancesForClusterPaginatorOptions is the paginator options for +// ListDbInstancesForCluster +type ListDbInstancesForClusterPaginatorOptions struct { + // The maximum number of items to return in the output. If the total number of + // items available is more than the value specified, a nextToken is provided in the + // output. To resume pagination, provide the nextToken value as an argument of a + // subsequent API invocation. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListDbInstancesForClusterPaginator is a paginator for ListDbInstancesForCluster +type ListDbInstancesForClusterPaginator struct { + options ListDbInstancesForClusterPaginatorOptions + client ListDbInstancesForClusterAPIClient + params *ListDbInstancesForClusterInput + nextToken *string + firstPage bool +} + +// NewListDbInstancesForClusterPaginator returns a new +// ListDbInstancesForClusterPaginator +func NewListDbInstancesForClusterPaginator(client ListDbInstancesForClusterAPIClient, params *ListDbInstancesForClusterInput, optFns ...func(*ListDbInstancesForClusterPaginatorOptions)) *ListDbInstancesForClusterPaginator { + if params == nil { + params = &ListDbInstancesForClusterInput{} + } + + options := ListDbInstancesForClusterPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListDbInstancesForClusterPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListDbInstancesForClusterPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListDbInstancesForCluster page. +func (p *ListDbInstancesForClusterPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDbInstancesForClusterOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListDbInstancesForCluster(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListDbInstancesForClusterAPIClient is a client that implements the +// ListDbInstancesForCluster operation. +type ListDbInstancesForClusterAPIClient interface { + ListDbInstancesForCluster(context.Context, *ListDbInstancesForClusterInput, ...func(*Options)) (*ListDbInstancesForClusterOutput, error) +} + +var _ ListDbInstancesForClusterAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListDbInstancesForCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListDbInstancesForCluster", + } +} diff --git a/service/timestreaminfluxdb/api_op_UpdateDbCluster.go b/service/timestreaminfluxdb/api_op_UpdateDbCluster.go new file mode 100644 index 00000000000..1da09f1e63e --- /dev/null +++ b/service/timestreaminfluxdb/api_op_UpdateDbCluster.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package timestreaminfluxdb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates a Timestream for InfluxDB cluster. +func (c *Client) UpdateDbCluster(ctx context.Context, params *UpdateDbClusterInput, optFns ...func(*Options)) (*UpdateDbClusterOutput, error) { + if params == nil { + params = &UpdateDbClusterInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateDbCluster", params, optFns, c.addOperationUpdateDbClusterMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateDbClusterOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateDbClusterInput struct { + + // Service-generated unique identifier of the DB cluster to update. + // + // This member is required. + DbClusterId *string + + // Update the DB cluster to use the specified DB instance Type. + DbInstanceType types.DbInstanceType + + // Update the DB cluster to use the specified DB parameter group. + DbParameterGroupIdentifier *string + + // Update the DB cluster's failover behavior. + FailoverMode types.FailoverMode + + // The log delivery configuration to apply to the DB cluster. + LogDeliveryConfiguration *types.LogDeliveryConfiguration + + // Update the DB cluster to use the specified port. + Port *int32 + + noSmithyDocumentSerde +} + +type UpdateDbClusterOutput struct { + + // The status of the DB cluster. + DbClusterStatus types.ClusterStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateDbClusterMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateDbCluster{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateDbCluster{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateDbCluster"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateDbClusterValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateDbCluster(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateDbCluster(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateDbCluster", + } +} diff --git a/service/timestreaminfluxdb/api_op_UpdateDbInstance.go b/service/timestreaminfluxdb/api_op_UpdateDbInstance.go index df113835585..3febae22a96 100644 --- a/service/timestreaminfluxdb/api_op_UpdateDbInstance.go +++ b/service/timestreaminfluxdb/api_op_UpdateDbInstance.go @@ -99,6 +99,9 @@ type UpdateDbInstanceOutput struct { // The Availability Zone in which the DB instance resides. AvailabilityZone *string + // Specifies the DbCluster to which this DbInstance belongs to. + DbClusterId *string + // The Timestream for InfluxDB instance type that InfluxDB runs on. DbInstanceType types.DbInstanceType @@ -121,6 +124,9 @@ type UpdateDbInstanceOutput struct { // organization, bucket, username, and password. InfluxAuthParametersSecretArn *string + // Specifies the DbInstance's role in the cluster. + InstanceMode types.InstanceMode + // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. LogDeliveryConfiguration *types.LogDeliveryConfiguration diff --git a/service/timestreaminfluxdb/deserializers.go b/service/timestreaminfluxdb/deserializers.go index cd83c74e05e..6a473c2cdc8 100644 --- a/service/timestreaminfluxdb/deserializers.go +++ b/service/timestreaminfluxdb/deserializers.go @@ -30,14 +30,14 @@ func deserializeS3Expires(v string) (*time.Time, error) { return &t, nil } -type awsAwsjson10_deserializeOpCreateDbInstance struct { +type awsAwsjson10_deserializeOpCreateDbCluster struct { } -func (*awsAwsjson10_deserializeOpCreateDbInstance) ID() string { +func (*awsAwsjson10_deserializeOpCreateDbCluster) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpCreateDbInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpCreateDbCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55,9 +55,9 @@ func (m *awsAwsjson10_deserializeOpCreateDbInstance) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorCreateDbInstance(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorCreateDbCluster(response, &metadata) } - output := &CreateDbInstanceOutput{} + output := &CreateDbClusterOutput{} out.Result = output var buff [1024]byte @@ -77,7 +77,7 @@ func (m *awsAwsjson10_deserializeOpCreateDbInstance) HandleDeserialize(ctx conte return out, metadata, err } - err = awsAwsjson10_deserializeOpDocumentCreateDbInstanceOutput(&output, shape) + err = awsAwsjson10_deserializeOpDocumentCreateDbClusterOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -91,7 +91,7 @@ func (m *awsAwsjson10_deserializeOpCreateDbInstance) HandleDeserialize(ctx conte return out, metadata, err } -func awsAwsjson10_deserializeOpErrorCreateDbInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorCreateDbCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -159,14 +159,14 @@ func awsAwsjson10_deserializeOpErrorCreateDbInstance(response *smithyhttp.Respon } } -type awsAwsjson10_deserializeOpCreateDbParameterGroup struct { +type awsAwsjson10_deserializeOpCreateDbInstance struct { } -func (*awsAwsjson10_deserializeOpCreateDbParameterGroup) ID() string { +func (*awsAwsjson10_deserializeOpCreateDbInstance) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpCreateDbParameterGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpCreateDbInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -184,9 +184,9 @@ func (m *awsAwsjson10_deserializeOpCreateDbParameterGroup) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorCreateDbParameterGroup(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorCreateDbInstance(response, &metadata) } - output := &CreateDbParameterGroupOutput{} + output := &CreateDbInstanceOutput{} out.Result = output var buff [1024]byte @@ -206,7 +206,7 @@ func (m *awsAwsjson10_deserializeOpCreateDbParameterGroup) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson10_deserializeOpDocumentCreateDbParameterGroupOutput(&output, shape) + err = awsAwsjson10_deserializeOpDocumentCreateDbInstanceOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -220,7 +220,7 @@ func (m *awsAwsjson10_deserializeOpCreateDbParameterGroup) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson10_deserializeOpErrorCreateDbParameterGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorCreateDbInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -288,14 +288,14 @@ func awsAwsjson10_deserializeOpErrorCreateDbParameterGroup(response *smithyhttp. } } -type awsAwsjson10_deserializeOpDeleteDbInstance struct { +type awsAwsjson10_deserializeOpCreateDbParameterGroup struct { } -func (*awsAwsjson10_deserializeOpDeleteDbInstance) ID() string { +func (*awsAwsjson10_deserializeOpCreateDbParameterGroup) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpDeleteDbInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpCreateDbParameterGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -313,9 +313,9 @@ func (m *awsAwsjson10_deserializeOpDeleteDbInstance) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorDeleteDbInstance(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorCreateDbParameterGroup(response, &metadata) } - output := &DeleteDbInstanceOutput{} + output := &CreateDbParameterGroupOutput{} out.Result = output var buff [1024]byte @@ -335,7 +335,7 @@ func (m *awsAwsjson10_deserializeOpDeleteDbInstance) HandleDeserialize(ctx conte return out, metadata, err } - err = awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(&output, shape) + err = awsAwsjson10_deserializeOpDocumentCreateDbParameterGroupOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -349,7 +349,7 @@ func (m *awsAwsjson10_deserializeOpDeleteDbInstance) HandleDeserialize(ctx conte return out, metadata, err } -func awsAwsjson10_deserializeOpErrorDeleteDbInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorCreateDbParameterGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -398,6 +398,9 @@ func awsAwsjson10_deserializeOpErrorDeleteDbInstance(response *smithyhttp.Respon case strings.EqualFold("ResourceNotFoundException", errorCode): return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsAwsjson10_deserializeErrorServiceQuotaExceededException(response, errorBody) + case strings.EqualFold("ThrottlingException", errorCode): return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) @@ -414,14 +417,14 @@ func awsAwsjson10_deserializeOpErrorDeleteDbInstance(response *smithyhttp.Respon } } -type awsAwsjson10_deserializeOpGetDbInstance struct { +type awsAwsjson10_deserializeOpDeleteDbCluster struct { } -func (*awsAwsjson10_deserializeOpGetDbInstance) ID() string { +func (*awsAwsjson10_deserializeOpDeleteDbCluster) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpGetDbInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpDeleteDbCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -439,9 +442,9 @@ func (m *awsAwsjson10_deserializeOpGetDbInstance) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorGetDbInstance(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorDeleteDbCluster(response, &metadata) } - output := &GetDbInstanceOutput{} + output := &DeleteDbClusterOutput{} out.Result = output var buff [1024]byte @@ -461,7 +464,7 @@ func (m *awsAwsjson10_deserializeOpGetDbInstance) HandleDeserialize(ctx context. return out, metadata, err } - err = awsAwsjson10_deserializeOpDocumentGetDbInstanceOutput(&output, shape) + err = awsAwsjson10_deserializeOpDocumentDeleteDbClusterOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -475,7 +478,7 @@ func (m *awsAwsjson10_deserializeOpGetDbInstance) HandleDeserialize(ctx context. return out, metadata, err } -func awsAwsjson10_deserializeOpErrorGetDbInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorDeleteDbCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -515,6 +518,9 @@ func awsAwsjson10_deserializeOpErrorGetDbInstance(response *smithyhttp.Response, case strings.EqualFold("AccessDeniedException", errorCode): return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) + case strings.EqualFold("ConflictException", errorCode): + return awsAwsjson10_deserializeErrorConflictException(response, errorBody) + case strings.EqualFold("InternalServerException", errorCode): return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) @@ -537,14 +543,14 @@ func awsAwsjson10_deserializeOpErrorGetDbInstance(response *smithyhttp.Response, } } -type awsAwsjson10_deserializeOpGetDbParameterGroup struct { +type awsAwsjson10_deserializeOpDeleteDbInstance struct { } -func (*awsAwsjson10_deserializeOpGetDbParameterGroup) ID() string { +func (*awsAwsjson10_deserializeOpDeleteDbInstance) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpGetDbParameterGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpDeleteDbInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -562,9 +568,9 @@ func (m *awsAwsjson10_deserializeOpGetDbParameterGroup) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorGetDbParameterGroup(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorDeleteDbInstance(response, &metadata) } - output := &GetDbParameterGroupOutput{} + output := &DeleteDbInstanceOutput{} out.Result = output var buff [1024]byte @@ -584,7 +590,7 @@ func (m *awsAwsjson10_deserializeOpGetDbParameterGroup) HandleDeserialize(ctx co return out, metadata, err } - err = awsAwsjson10_deserializeOpDocumentGetDbParameterGroupOutput(&output, shape) + err = awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -598,7 +604,7 @@ func (m *awsAwsjson10_deserializeOpGetDbParameterGroup) HandleDeserialize(ctx co return out, metadata, err } -func awsAwsjson10_deserializeOpErrorGetDbParameterGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorDeleteDbInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -638,6 +644,9 @@ func awsAwsjson10_deserializeOpErrorGetDbParameterGroup(response *smithyhttp.Res case strings.EqualFold("AccessDeniedException", errorCode): return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) + case strings.EqualFold("ConflictException", errorCode): + return awsAwsjson10_deserializeErrorConflictException(response, errorBody) + case strings.EqualFold("InternalServerException", errorCode): return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) @@ -660,14 +669,14 @@ func awsAwsjson10_deserializeOpErrorGetDbParameterGroup(response *smithyhttp.Res } } -type awsAwsjson10_deserializeOpListDbInstances struct { +type awsAwsjson10_deserializeOpGetDbCluster struct { } -func (*awsAwsjson10_deserializeOpListDbInstances) ID() string { +func (*awsAwsjson10_deserializeOpGetDbCluster) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpListDbInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpGetDbCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -685,9 +694,9 @@ func (m *awsAwsjson10_deserializeOpListDbInstances) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorListDbInstances(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorGetDbCluster(response, &metadata) } - output := &ListDbInstancesOutput{} + output := &GetDbClusterOutput{} out.Result = output var buff [1024]byte @@ -707,7 +716,7 @@ func (m *awsAwsjson10_deserializeOpListDbInstances) HandleDeserialize(ctx contex return out, metadata, err } - err = awsAwsjson10_deserializeOpDocumentListDbInstancesOutput(&output, shape) + err = awsAwsjson10_deserializeOpDocumentGetDbClusterOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -721,7 +730,7 @@ func (m *awsAwsjson10_deserializeOpListDbInstances) HandleDeserialize(ctx contex return out, metadata, err } -func awsAwsjson10_deserializeOpErrorListDbInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorGetDbCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -783,14 +792,14 @@ func awsAwsjson10_deserializeOpErrorListDbInstances(response *smithyhttp.Respons } } -type awsAwsjson10_deserializeOpListDbParameterGroups struct { +type awsAwsjson10_deserializeOpGetDbInstance struct { } -func (*awsAwsjson10_deserializeOpListDbParameterGroups) ID() string { +func (*awsAwsjson10_deserializeOpGetDbInstance) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpListDbParameterGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpGetDbInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -808,9 +817,9 @@ func (m *awsAwsjson10_deserializeOpListDbParameterGroups) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorListDbParameterGroups(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorGetDbInstance(response, &metadata) } - output := &ListDbParameterGroupsOutput{} + output := &GetDbInstanceOutput{} out.Result = output var buff [1024]byte @@ -830,7 +839,7 @@ func (m *awsAwsjson10_deserializeOpListDbParameterGroups) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson10_deserializeOpDocumentListDbParameterGroupsOutput(&output, shape) + err = awsAwsjson10_deserializeOpDocumentGetDbInstanceOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -844,7 +853,7 @@ func (m *awsAwsjson10_deserializeOpListDbParameterGroups) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson10_deserializeOpErrorListDbParameterGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorGetDbInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -906,14 +915,14 @@ func awsAwsjson10_deserializeOpErrorListDbParameterGroups(response *smithyhttp.R } } -type awsAwsjson10_deserializeOpListTagsForResource struct { +type awsAwsjson10_deserializeOpGetDbParameterGroup struct { } -func (*awsAwsjson10_deserializeOpListTagsForResource) ID() string { +func (*awsAwsjson10_deserializeOpGetDbParameterGroup) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpGetDbParameterGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -931,9 +940,9 @@ func (m *awsAwsjson10_deserializeOpListTagsForResource) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorListTagsForResource(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorGetDbParameterGroup(response, &metadata) } - output := &ListTagsForResourceOutput{} + output := &GetDbParameterGroupOutput{} out.Result = output var buff [1024]byte @@ -953,7 +962,7 @@ func (m *awsAwsjson10_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } - err = awsAwsjson10_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + err = awsAwsjson10_deserializeOpDocumentGetDbParameterGroupOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -967,7 +976,7 @@ func (m *awsAwsjson10_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } -func awsAwsjson10_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorGetDbParameterGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1004,9 +1013,21 @@ func awsAwsjson10_deserializeOpErrorListTagsForResource(response *smithyhttp.Res errorMessage = bodyInfo.Message } switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson10_deserializeErrorValidationException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1017,14 +1038,14 @@ func awsAwsjson10_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } } -type awsAwsjson10_deserializeOpTagResource struct { +type awsAwsjson10_deserializeOpListDbClusters struct { } -func (*awsAwsjson10_deserializeOpTagResource) ID() string { +func (*awsAwsjson10_deserializeOpListDbClusters) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpListDbClusters) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1042,21 +1063,43 @@ func (m *awsAwsjson10_deserializeOpTagResource) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorTagResource(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorListDbClusters(response, &metadata) } - output := &TagResourceOutput{} + output := &ListDbClustersOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListDbClustersOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsAwsjson10_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorListDbClusters(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1093,11 +1136,20 @@ func awsAwsjson10_deserializeOpErrorTagResource(response *smithyhttp.Response, m errorMessage = bodyInfo.Message } switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) - case strings.EqualFold("ServiceQuotaExceededException", errorCode): - return awsAwsjson10_deserializeErrorServiceQuotaExceededException(response, errorBody) + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson10_deserializeErrorValidationException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -1109,14 +1161,14 @@ func awsAwsjson10_deserializeOpErrorTagResource(response *smithyhttp.Response, m } } -type awsAwsjson10_deserializeOpUntagResource struct { +type awsAwsjson10_deserializeOpListDbInstances struct { } -func (*awsAwsjson10_deserializeOpUntagResource) ID() string { +func (*awsAwsjson10_deserializeOpListDbInstances) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpListDbInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1134,21 +1186,43 @@ func (m *awsAwsjson10_deserializeOpUntagResource) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorUntagResource(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorListDbInstances(response, &metadata) } - output := &UntagResourceOutput{} + output := &ListDbInstancesOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListDbInstancesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsAwsjson10_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorListDbInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1185,9 +1259,21 @@ func awsAwsjson10_deserializeOpErrorUntagResource(response *smithyhttp.Response, errorMessage = bodyInfo.Message } switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson10_deserializeErrorValidationException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1198,14 +1284,14 @@ func awsAwsjson10_deserializeOpErrorUntagResource(response *smithyhttp.Response, } } -type awsAwsjson10_deserializeOpUpdateDbInstance struct { +type awsAwsjson10_deserializeOpListDbInstancesForCluster struct { } -func (*awsAwsjson10_deserializeOpUpdateDbInstance) ID() string { +func (*awsAwsjson10_deserializeOpListDbInstancesForCluster) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson10_deserializeOpUpdateDbInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson10_deserializeOpListDbInstancesForCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1223,9 +1309,9 @@ func (m *awsAwsjson10_deserializeOpUpdateDbInstance) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson10_deserializeOpErrorUpdateDbInstance(response, &metadata) + return out, metadata, awsAwsjson10_deserializeOpErrorListDbInstancesForCluster(response, &metadata) } - output := &UpdateDbInstanceOutput{} + output := &ListDbInstancesForClusterOutput{} out.Result = output var buff [1024]byte @@ -1245,7 +1331,7 @@ func (m *awsAwsjson10_deserializeOpUpdateDbInstance) HandleDeserialize(ctx conte return out, metadata, err } - err = awsAwsjson10_deserializeOpDocumentUpdateDbInstanceOutput(&output, shape) + err = awsAwsjson10_deserializeOpDocumentListDbInstancesForClusterOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1259,7 +1345,7 @@ func (m *awsAwsjson10_deserializeOpUpdateDbInstance) HandleDeserialize(ctx conte return out, metadata, err } -func awsAwsjson10_deserializeOpErrorUpdateDbInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson10_deserializeOpErrorListDbInstancesForCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1299,9 +1385,6 @@ func awsAwsjson10_deserializeOpErrorUpdateDbInstance(response *smithyhttp.Respon case strings.EqualFold("AccessDeniedException", errorCode): return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) - case strings.EqualFold("ConflictException", errorCode): - return awsAwsjson10_deserializeErrorConflictException(response, errorBody) - case strings.EqualFold("InternalServerException", errorCode): return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) @@ -1324,11 +1407,40 @@ func awsAwsjson10_deserializeOpErrorUpdateDbInstance(response *smithyhttp.Respon } } -func awsAwsjson10_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { +type awsAwsjson10_deserializeOpListDbParameterGroups struct { +} + +func (*awsAwsjson10_deserializeOpListDbParameterGroups) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpListDbParameterGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListDbParameterGroups(response, &metadata) + } + output := &ListDbParameterGroupsOutput{} + out.Result = output + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) + body := io.TeeReader(response.Body, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() var shape interface{} @@ -1339,12 +1451,10 @@ func awsAwsjson10_deserializeErrorAccessDeniedException(response *smithyhttp.Res Err: fmt.Errorf("failed to decode response body, %w", err), Snapshot: snapshot.Bytes(), } - return err + return out, metadata, err } - output := &types.AccessDeniedException{} - err := awsAwsjson10_deserializeDocumentAccessDeniedException(&output, shape) - + err = awsAwsjson10_deserializeOpDocumentListDbParameterGroupsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1352,34 +1462,31 @@ func awsAwsjson10_deserializeErrorAccessDeniedException(response *smithyhttp.Res Err: fmt.Errorf("failed to decode response body, %w", err), Snapshot: snapshot.Bytes(), } - return err + return out, metadata, err } - errorBody.Seek(0, io.SeekStart) - return output + return out, metadata, err } -func awsAwsjson10_deserializeErrorConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { +func awsAwsjson10_deserializeOpErrorListDbParameterGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - output := &types.ConflictException{} - err := awsAwsjson10_deserializeDocumentConflictException(&output, shape) - + bodyInfo, err := getProtocolErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1391,49 +1498,72 @@ func awsAwsjson10_deserializeErrorConflictException(response *smithyhttp.Respons } errorBody.Seek(0, io.SeekStart) - return output -} + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) -func awsAwsjson10_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson10_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, } - return err + return genericError + } +} - output := &types.InternalServerException{} - err := awsAwsjson10_deserializeDocumentInternalServerException(&output, shape) +type awsAwsjson10_deserializeOpListTagsForResource struct { +} + +func (*awsAwsjson10_deserializeOpListTagsForResource) ID() string { + return "OperationDeserializer" +} +func (m *awsAwsjson10_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err + return out, metadata, err } - errorBody.Seek(0, io.SeekStart) - return output -} + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListTagsForResource(response, &metadata) + } + output := &ListTagsForResourceOutput{} + out.Result = output -func awsAwsjson10_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) + body := io.TeeReader(response.Body, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() var shape interface{} @@ -1444,12 +1574,10 @@ func awsAwsjson10_deserializeErrorResourceNotFoundException(response *smithyhttp Err: fmt.Errorf("failed to decode response body, %w", err), Snapshot: snapshot.Bytes(), } - return err + return out, metadata, err } - output := &types.ResourceNotFoundException{} - err := awsAwsjson10_deserializeDocumentResourceNotFoundException(&output, shape) - + err = awsAwsjson10_deserializeOpDocumentListTagsForResourceOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1457,22 +1585,32 @@ func awsAwsjson10_deserializeErrorResourceNotFoundException(response *smithyhttp Err: fmt.Errorf("failed to decode response body, %w", err), Snapshot: snapshot.Bytes(), } - return err + return out, metadata, err } - errorBody.Seek(0, io.SeekStart) - return output + return out, metadata, err } -func awsAwsjson10_deserializeErrorServiceQuotaExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { +func awsAwsjson10_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) err = &smithy.DeserializationError{ @@ -1482,67 +1620,86 @@ func awsAwsjson10_deserializeErrorServiceQuotaExceededException(response *smithy return err } - output := &types.ServiceQuotaExceededException{} - err := awsAwsjson10_deserializeDocumentServiceQuotaExceededException(&output, shape) + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, } - return err + return genericError + } +} - errorBody.Seek(0, io.SeekStart) - return output +type awsAwsjson10_deserializeOpTagResource struct { } -func awsAwsjson10_deserializeErrorThrottlingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) +func (*awsAwsjson10_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err +func (m *awsAwsjson10_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err } - output := &types.ThrottlingException{} - err := awsAwsjson10_deserializeDocumentThrottlingException(&output, shape) + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), } - return err } - errorBody.Seek(0, io.SeekStart) - return output + return out, metadata, err } -func awsAwsjson10_deserializeErrorValidationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { +func awsAwsjson10_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) err = &smithy.DeserializationError{ @@ -1552,24 +1709,1249 @@ func awsAwsjson10_deserializeErrorValidationException(response *smithyhttp.Respo return err } - output := &types.ValidationException{} - err := awsAwsjson10_deserializeDocumentValidationException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsAwsjson10_deserializeErrorServiceQuotaExceededException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUntagResource struct { +} + +func (*awsAwsjson10_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateDbCluster struct { +} + +func (*awsAwsjson10_deserializeOpUpdateDbCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateDbCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateDbCluster(response, &metadata) + } + output := &UpdateDbClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateDbClusterOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateDbCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } return err } - errorBody.Seek(0, io.SeekStart) - return output + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsAwsjson10_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson10_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateDbInstance struct { +} + +func (*awsAwsjson10_deserializeOpUpdateDbInstance) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateDbInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateDbInstance(response, &metadata) + } + output := &UpdateDbInstanceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateDbInstanceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateDbInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsAwsjson10_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson10_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson10_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.AccessDeniedException{} + err := awsAwsjson10_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ConflictException{} + err := awsAwsjson10_deserializeDocumentConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InternalServerException{} + err := awsAwsjson10_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ResourceNotFoundException{} + err := awsAwsjson10_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorServiceQuotaExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ServiceQuotaExceededException{} + err := awsAwsjson10_deserializeDocumentServiceQuotaExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorThrottlingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ThrottlingException{} + err := awsAwsjson10_deserializeDocumentThrottlingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorValidationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ValidationException{} + err := awsAwsjson10_deserializeDocumentValidationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentConflictException(v **types.ConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConflictException + if *v == nil { + sv = &types.ConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "resourceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ResourceId = ptr.String(jtv) + } + + case "resourceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ResourceType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentDbClusterSummary(v **types.DbClusterSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DbClusterSummary + if *v == nil { + sv = &types.DbClusterSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "allocatedStorage": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected AllocatedStorage to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AllocatedStorage = ptr.Int32(int32(i64)) + } + + case "arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "dbInstanceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbInstanceType to be of type string, got %T instead", value) + } + sv.DbInstanceType = types.DbInstanceType(jtv) + } + + case "dbStorageType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbStorageType to be of type string, got %T instead", value) + } + sv.DbStorageType = types.DbStorageType(jtv) + } + + case "deploymentType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClusterDeploymentType to be of type string, got %T instead", value) + } + sv.DeploymentType = types.ClusterDeploymentType(jtv) + } + + case "endpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Endpoint = ptr.String(jtv) + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbClusterId to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbClusterName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "networkType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkType to be of type string, got %T instead", value) + } + sv.NetworkType = types.NetworkType(jtv) + } + + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Port to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = ptr.Int32(int32(i64)) + } + + case "readerEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ReaderEndpoint = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClusterStatus to be of type string, got %T instead", value) + } + sv.Status = types.ClusterStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentDbClusterSummaryList(v *[]types.DbClusterSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DbClusterSummary + if *v == nil { + cv = []types.DbClusterSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DbClusterSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentDbClusterSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentDbInstanceForClusterSummary(v **types.DbInstanceForClusterSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DbInstanceForClusterSummary + if *v == nil { + sv = &types.DbInstanceForClusterSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "allocatedStorage": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected AllocatedStorage to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AllocatedStorage = ptr.Int32(int32(i64)) + } + + case "arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "dbInstanceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbInstanceType to be of type string, got %T instead", value) + } + sv.DbInstanceType = types.DbInstanceType(jtv) + } + + case "dbStorageType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbStorageType to be of type string, got %T instead", value) + } + sv.DbStorageType = types.DbStorageType(jtv) + } + + case "deploymentType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeploymentType to be of type string, got %T instead", value) + } + sv.DeploymentType = types.DeploymentType(jtv) + } + + case "endpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Endpoint = ptr.String(jtv) + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbInstanceId to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "instanceMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InstanceMode to be of type string, got %T instead", value) + } + sv.InstanceMode = types.InstanceMode(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbInstanceName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "networkType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkType to be of type string, got %T instead", value) + } + sv.NetworkType = types.NetworkType(jtv) + } + + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Port to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = ptr.Int32(int32(i64)) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Status to be of type string, got %T instead", value) + } + sv.Status = types.Status(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentDbInstanceForClusterSummaryList(v *[]types.DbInstanceForClusterSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DbInstanceForClusterSummary + if *v == nil { + cv = []types.DbInstanceForClusterSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DbInstanceForClusterSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentDbInstanceForClusterSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentDbInstanceSummary(v **types.DbInstanceSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DbInstanceSummary + if *v == nil { + sv = &types.DbInstanceSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "allocatedStorage": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected AllocatedStorage to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AllocatedStorage = ptr.Int32(int32(i64)) + } + + case "arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "dbInstanceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbInstanceType to be of type string, got %T instead", value) + } + sv.DbInstanceType = types.DbInstanceType(jtv) + } + + case "dbStorageType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbStorageType to be of type string, got %T instead", value) + } + sv.DbStorageType = types.DbStorageType(jtv) + } + + case "deploymentType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeploymentType to be of type string, got %T instead", value) + } + sv.DeploymentType = types.DeploymentType(jtv) + } + + case "endpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Endpoint = ptr.String(jtv) + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbInstanceId to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbInstanceName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "networkType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkType to be of type string, got %T instead", value) + } + sv.NetworkType = types.NetworkType(jtv) + } + + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Port to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = ptr.Int32(int32(i64)) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Status to be of type string, got %T instead", value) + } + sv.Status = types.Status(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentDbInstanceSummaryList(v *[]types.DbInstanceSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DbInstanceSummary + if *v == nil { + cv = []types.DbInstanceSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DbInstanceSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentDbInstanceSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil } -func awsAwsjson10_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { +func awsAwsjson10_deserializeDocumentDbParameterGroupSummary(v **types.DbParameterGroupSummary, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -1582,22 +2964,49 @@ func awsAwsjson10_deserializeDocumentAccessDeniedException(v **types.AccessDenie return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.AccessDeniedException + var sv *types.DbParameterGroupSummary if *v == nil { - sv = &types.AccessDeniedException{} + sv = &types.DbParameterGroupSummary{} } else { sv = *v } for key, value := range shape { switch key { - case "message", "Message": + case "arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "description": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.Description = ptr.String(jtv) + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbParameterGroupId to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbParameterGroupName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) } default: @@ -1609,7 +3018,41 @@ func awsAwsjson10_deserializeDocumentAccessDeniedException(v **types.AccessDenie return nil } -func awsAwsjson10_deserializeDocumentConflictException(v **types.ConflictException, value interface{}) error { +func awsAwsjson10_deserializeDocumentDbParameterGroupSummaryList(v *[]types.DbParameterGroupSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DbParameterGroupSummary + if *v == nil { + cv = []types.DbParameterGroupSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DbParameterGroupSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentDbParameterGroupSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentDuration(v **types.Duration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -1622,279 +3065,408 @@ func awsAwsjson10_deserializeDocumentConflictException(v **types.ConflictExcepti return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ConflictException + var sv *types.Duration if *v == nil { - sv = &types.ConflictException{} + sv = &types.Duration{} } else { sv = *v } for key, value := range shape { switch key { - case "message", "Message": + case "durationType": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected DurationType to be of type string, got %T instead", value) + } + sv.DurationType = types.DurationType(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Value = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Parameters, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InfluxDBv2Parameters + if *v == nil { + sv = &types.InfluxDBv2Parameters{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "fluxLogEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.FluxLogEnabled = ptr.Bool(jtv) + } + + case "httpIdleTimeout": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpIdleTimeout, value); err != nil { + return err + } + + case "httpReadHeaderTimeout": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpReadHeaderTimeout, value); err != nil { + return err + } + + case "httpReadTimeout": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpReadTimeout, value); err != nil { + return err + } + + case "httpWriteTimeout": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpWriteTimeout, value); err != nil { + return err + } + + case "influxqlMaxSelectBuckets": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InfluxqlMaxSelectBuckets = ptr.Int64(i64) + } + + case "influxqlMaxSelectPoint": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InfluxqlMaxSelectPoint = ptr.Int64(i64) + } + + case "influxqlMaxSelectSeries": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InfluxqlMaxSelectSeries = ptr.Int64(i64) + } + + case "logLevel": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LogLevel to be of type string, got %T instead", value) + } + sv.LogLevel = types.LogLevel(jtv) + } + + case "metricsDisabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.MetricsDisabled = ptr.Bool(jtv) + } + + case "noTasks": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.NoTasks = ptr.Bool(jtv) + } + + case "pprofDisabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.PprofDisabled = ptr.Bool(jtv) + } + + case "queryConcurrency": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err } - sv.Message = ptr.String(jtv) + sv.QueryConcurrency = ptr.Int32(int32(i64)) } - case "resourceId": + case "queryInitialMemoryBytes": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) } - sv.ResourceId = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryInitialMemoryBytes = ptr.Int64(i64) } - case "resourceType": + case "queryMaxMemoryBytes": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) } - sv.ResourceType = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryMaxMemoryBytes = ptr.Int64(i64) } - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson10_deserializeDocumentDbInstanceSummary(v **types.DbInstanceSummary, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.DbInstanceSummary - if *v == nil { - sv = &types.DbInstanceSummary{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "allocatedStorage": + case "queryMemoryBytes": if value != nil { jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected AllocatedStorage to be json.Number, got %T instead", value) + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) } i64, err := jtv.Int64() if err != nil { return err } - sv.AllocatedStorage = ptr.Int32(int32(i64)) + sv.QueryMemoryBytes = ptr.Int64(i64) } - case "arn": + case "queryQueueSize": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) } - sv.Arn = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryQueueSize = ptr.Int32(int32(i64)) } - case "dbInstanceType": + case "sessionLength": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected DbInstanceType to be of type string, got %T instead", value) + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) } - sv.DbInstanceType = types.DbInstanceType(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.SessionLength = ptr.Int32(int32(i64)) } - case "dbStorageType": + case "sessionRenewDisabled": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(bool) if !ok { - return fmt.Errorf("expected DbStorageType to be of type string, got %T instead", value) + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) } - sv.DbStorageType = types.DbStorageType(jtv) + sv.SessionRenewDisabled = ptr.Bool(jtv) } - case "deploymentType": + case "storageCacheMaxMemorySize": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected DeploymentType to be of type string, got %T instead", value) + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) } - sv.DeploymentType = types.DeploymentType(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageCacheMaxMemorySize = ptr.Int64(i64) } - case "endpoint": + case "storageCacheSnapshotMemorySize": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) } - sv.Endpoint = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageCacheSnapshotMemorySize = ptr.Int64(i64) } - case "id": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected DbInstanceId to be of type string, got %T instead", value) - } - sv.Id = ptr.String(jtv) + case "storageCacheSnapshotWriteColdDuration": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageCacheSnapshotWriteColdDuration, value); err != nil { + return err } - case "name": + case "storageCompactFullWriteColdDuration": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageCompactFullWriteColdDuration, value); err != nil { + return err + } + + case "storageCompactThroughputBurst": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected DbInstanceName to be of type string, got %T instead", value) + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) } - sv.Name = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageCompactThroughputBurst = ptr.Int64(i64) } - case "networkType": + case "storageMaxConcurrentCompactions": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected NetworkType to be of type string, got %T instead", value) + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) } - sv.NetworkType = types.NetworkType(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageMaxConcurrentCompactions = ptr.Int32(int32(i64)) } - case "port": + case "storageMaxIndexLogFileSize": if value != nil { jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected Port to be json.Number, got %T instead", value) + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) } i64, err := jtv.Int64() if err != nil { return err } - sv.Port = ptr.Int32(int32(i64)) + sv.StorageMaxIndexLogFileSize = ptr.Int64(i64) } - case "status": + case "storageNoValidateFieldSize": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(bool) if !ok { - return fmt.Errorf("expected Status to be of type string, got %T instead", value) + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) } - sv.Status = types.Status(jtv) + sv.StorageNoValidateFieldSize = ptr.Bool(jtv) } - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson10_deserializeDocumentDbInstanceSummaryList(v *[]types.DbInstanceSummary, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.DbInstanceSummary - if *v == nil { - cv = []types.DbInstanceSummary{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.DbInstanceSummary - destAddr := &col - if err := awsAwsjson10_deserializeDocumentDbInstanceSummary(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson10_deserializeDocumentDbParameterGroupSummary(v **types.DbParameterGroupSummary, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } + case "storageRetentionCheckInterval": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageRetentionCheckInterval, value); err != nil { + return err + } - var sv *types.DbParameterGroupSummary - if *v == nil { - sv = &types.DbParameterGroupSummary{} - } else { - sv = *v - } + case "storageSeriesFileMaxConcurrentSnapshotCompactions": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageSeriesFileMaxConcurrentSnapshotCompactions = ptr.Int32(int32(i64)) + } - for key, value := range shape { - switch key { - case "arn": + case "storageSeriesIdSetCacheSize": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) } - sv.Arn = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageSeriesIdSetCacheSize = ptr.Int64(i64) } - case "description": + case "storageWalMaxConcurrentWrites": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) } - sv.Description = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageWalMaxConcurrentWrites = ptr.Int32(int32(i64)) } - case "id": + case "storageWalMaxWriteDelay": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageWalMaxWriteDelay, value); err != nil { + return err + } + + case "tracingType": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected DbParameterGroupId to be of type string, got %T instead", value) + return fmt.Errorf("expected TracingType to be of type string, got %T instead", value) } - sv.Id = ptr.String(jtv) + sv.TracingType = types.TracingType(jtv) } - case "name": + case "uiDisabled": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(bool) if !ok { - return fmt.Errorf("expected DbParameterGroupName to be of type string, got %T instead", value) + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) } - sv.Name = ptr.String(jtv) + sv.UiDisabled = ptr.Bool(jtv) } default: @@ -1906,7 +3478,7 @@ func awsAwsjson10_deserializeDocumentDbParameterGroupSummary(v **types.DbParamet return nil } -func awsAwsjson10_deserializeDocumentDbParameterGroupSummaryList(v *[]types.DbParameterGroupSummary, value interface{}) error { +func awsAwsjson10_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -1914,33 +3486,39 @@ func awsAwsjson10_deserializeDocumentDbParameterGroupSummaryList(v *[]types.DbPa return nil } - shape, ok := value.([]interface{}) + shape, ok := value.(map[string]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.DbParameterGroupSummary + var sv *types.InternalServerException if *v == nil { - cv = []types.DbParameterGroupSummary{} + sv = &types.InternalServerException{} } else { - cv = *v + sv = *v } - for _, value := range shape { - var col types.DbParameterGroupSummary - destAddr := &col - if err := awsAwsjson10_deserializeDocumentDbParameterGroupSummary(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + default: + _, _ = key, value + + } } - *v = cv + *v = sv return nil } -func awsAwsjson10_deserializeDocumentDuration(v **types.Duration, value interface{}) error { +func awsAwsjson10_deserializeDocumentLogDeliveryConfiguration(v **types.LogDeliveryConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -1953,35 +3531,18 @@ func awsAwsjson10_deserializeDocumentDuration(v **types.Duration, value interfac return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.Duration + var sv *types.LogDeliveryConfiguration if *v == nil { - sv = &types.Duration{} + sv = &types.LogDeliveryConfiguration{} } else { sv = *v } for key, value := range shape { switch key { - case "durationType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected DurationType to be of type string, got %T instead", value) - } - sv.DurationType = types.DurationType(jtv) - } - - case "value": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.Value = ptr.Int64(i64) + case "s3Configuration": + if err := awsAwsjson10_deserializeDocumentS3Configuration(&sv.S3Configuration, value); err != nil { + return err } default: @@ -1993,7 +3554,7 @@ func awsAwsjson10_deserializeDocumentDuration(v **types.Duration, value interfac return nil } -func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Parameters, value interface{}) error { +func awsAwsjson10_deserializeDocumentParameters(v *types.Parameters, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2006,355 +3567,205 @@ func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Pa return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.InfluxDBv2Parameters - if *v == nil { - sv = &types.InfluxDBv2Parameters{} - } else { - sv = *v - } - + var uv types.Parameters +loop: for key, value := range shape { + if value == nil { + continue + } switch key { - case "fluxLogEnabled": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) - } - sv.FluxLogEnabled = ptr.Bool(jtv) - } - - case "httpIdleTimeout": - if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpIdleTimeout, value); err != nil { - return err - } - - case "httpReadHeaderTimeout": - if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpReadHeaderTimeout, value); err != nil { - return err - } - - case "httpReadTimeout": - if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpReadTimeout, value); err != nil { - return err - } - - case "httpWriteTimeout": - if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpWriteTimeout, value); err != nil { + case "InfluxDBv2": + var mv types.InfluxDBv2Parameters + destAddr := &mv + if err := awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(&destAddr, value); err != nil { return err } + mv = *destAddr + uv = &types.ParametersMemberInfluxDBv2{Value: mv} + break loop - case "influxqlMaxSelectBuckets": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.InfluxqlMaxSelectBuckets = ptr.Int64(i64) - } - - case "influxqlMaxSelectPoint": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.InfluxqlMaxSelectPoint = ptr.Int64(i64) - } - - case "influxqlMaxSelectSeries": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.InfluxqlMaxSelectSeries = ptr.Int64(i64) - } - - case "logLevel": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LogLevel to be of type string, got %T instead", value) - } - sv.LogLevel = types.LogLevel(jtv) - } + default: + uv = &types.UnknownUnionMember{Tag: key} + break loop - case "metricsDisabled": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) - } - sv.MetricsDisabled = ptr.Bool(jtv) - } + } + } + *v = uv + return nil +} - case "noTasks": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) - } - sv.NoTasks = ptr.Bool(jtv) - } +func awsAwsjson10_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } - case "pprofDisabled": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) - } - sv.PprofDisabled = ptr.Bool(jtv) - } + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } - case "queryConcurrency": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.QueryConcurrency = ptr.Int32(int32(i64)) - } + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } - case "queryInitialMemoryBytes": + for key, value := range shape { + switch key { + case "message", "Message": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err + return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.QueryInitialMemoryBytes = ptr.Int64(i64) + sv.Message = ptr.String(jtv) } - case "queryMaxMemoryBytes": + case "resourceId": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err + return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.QueryMaxMemoryBytes = ptr.Int64(i64) + sv.ResourceId = ptr.String(jtv) } - case "queryMemoryBytes": + case "resourceType": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err + return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.QueryMemoryBytes = ptr.Int64(i64) + sv.ResourceType = ptr.String(jtv) } - case "queryQueueSize": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.QueryQueueSize = ptr.Int32(int32(i64)) - } + default: + _, _ = key, value - case "sessionLength": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.SessionLength = ptr.Int32(int32(i64)) - } + } + } + *v = sv + return nil +} - case "sessionRenewDisabled": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) - } - sv.SessionRenewDisabled = ptr.Bool(jtv) - } +func awsAwsjson10_deserializeDocumentResponseTagMap(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } - case "storageCacheMaxMemorySize": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.StorageCacheMaxMemorySize = ptr.Int64(i64) - } + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } - case "storageCacheSnapshotMemorySize": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.StorageCacheSnapshotMemorySize = ptr.Int64(i64) - } + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } - case "storageCacheSnapshotWriteColdDuration": - if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageCacheSnapshotWriteColdDuration, value); err != nil { - return err + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) } + parsedVal = jtv + } + mv[key] = parsedVal - case "storageCompactFullWriteColdDuration": - if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageCompactFullWriteColdDuration, value); err != nil { - return err - } + } + *v = mv + return nil +} - case "storageCompactThroughputBurst": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.StorageCompactThroughputBurst = ptr.Int64(i64) - } +func awsAwsjson10_deserializeDocumentS3Configuration(v **types.S3Configuration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } - case "storageMaxConcurrentCompactions": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.StorageMaxConcurrentCompactions = ptr.Int32(int32(i64)) - } + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } - case "storageMaxIndexLogFileSize": + var sv *types.S3Configuration + if *v == nil { + sv = &types.S3Configuration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "bucketName": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err + return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.StorageMaxIndexLogFileSize = ptr.Int64(i64) + sv.BucketName = ptr.String(jtv) } - case "storageNoValidateFieldSize": + case "enabled": if value != nil { jtv, ok := value.(bool) if !ok { return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) } - sv.StorageNoValidateFieldSize = ptr.Bool(jtv) + sv.Enabled = ptr.Bool(jtv) } - case "storageRetentionCheckInterval": - if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageRetentionCheckInterval, value); err != nil { - return err - } + default: + _, _ = key, value - case "storageSeriesFileMaxConcurrentSnapshotCompactions": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.StorageSeriesFileMaxConcurrentSnapshotCompactions = ptr.Int32(int32(i64)) - } + } + } + *v = sv + return nil +} - case "storageSeriesIdSetCacheSize": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Long to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.StorageSeriesIdSetCacheSize = ptr.Int64(i64) - } +func awsAwsjson10_deserializeDocumentServiceQuotaExceededException(v **types.ServiceQuotaExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } - case "storageWalMaxConcurrentWrites": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.StorageWalMaxConcurrentWrites = ptr.Int32(int32(i64)) - } + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } - case "storageWalMaxWriteDelay": - if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageWalMaxWriteDelay, value); err != nil { - return err - } + var sv *types.ServiceQuotaExceededException + if *v == nil { + sv = &types.ServiceQuotaExceededException{} + } else { + sv = *v + } - case "tracingType": + for key, value := range shape { + switch key { + case "message", "Message": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected TracingType to be of type string, got %T instead", value) - } - sv.TracingType = types.TracingType(jtv) - } - - case "uiDisabled": - if value != nil { - jtv, ok := value.(bool) - if !ok { - return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.UiDisabled = ptr.Bool(jtv) + sv.Message = ptr.String(jtv) } default: @@ -2366,7 +3777,7 @@ func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Pa return nil } -func awsAwsjson10_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { +func awsAwsjson10_deserializeDocumentThrottlingException(v **types.ThrottlingException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2379,9 +3790,9 @@ func awsAwsjson10_deserializeDocumentInternalServerException(v **types.InternalS return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.InternalServerException + var sv *types.ThrottlingException if *v == nil { - sv = &types.InternalServerException{} + sv = &types.ThrottlingException{} } else { sv = *v } @@ -2397,6 +3808,19 @@ func awsAwsjson10_deserializeDocumentInternalServerException(v **types.InternalS sv.Message = ptr.String(jtv) } + case "retryAfterSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.RetryAfterSeconds = ptr.Int32(int32(i64)) + } + default: _, _ = key, value @@ -2406,7 +3830,7 @@ func awsAwsjson10_deserializeDocumentInternalServerException(v **types.InternalS return nil } -func awsAwsjson10_deserializeDocumentLogDeliveryConfiguration(v **types.LogDeliveryConfiguration, value interface{}) error { +func awsAwsjson10_deserializeDocumentValidationException(v **types.ValidationException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2419,18 +3843,31 @@ func awsAwsjson10_deserializeDocumentLogDeliveryConfiguration(v **types.LogDeliv return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.LogDeliveryConfiguration + var sv *types.ValidationException if *v == nil { - sv = &types.LogDeliveryConfiguration{} + sv = &types.ValidationException{} } else { sv = *v } for key, value := range shape { switch key { - case "s3Configuration": - if err := awsAwsjson10_deserializeDocumentS3Configuration(&sv.S3Configuration, value); err != nil { - return err + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "reason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ValidationExceptionReason to be of type string, got %T instead", value) + } + sv.Reason = types.ValidationExceptionReason(jtv) } default: @@ -2442,7 +3879,7 @@ func awsAwsjson10_deserializeDocumentLogDeliveryConfiguration(v **types.LogDeliv return nil } -func awsAwsjson10_deserializeDocumentParameters(v *types.Parameters, value interface{}) error { +func awsAwsjson10_deserializeDocumentVpcSecurityGroupIdList(v *[]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2450,39 +3887,71 @@ func awsAwsjson10_deserializeDocumentParameters(v *types.Parameters, value inter return nil } - shape, ok := value.(map[string]interface{}) + shape, ok := value.([]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var uv types.Parameters -loop: - for key, value := range shape { - if value == nil { - continue - } - switch key { - case "InfluxDBv2": - var mv types.InfluxDBv2Parameters - destAddr := &mv - if err := awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(&destAddr, value); err != nil { - return err + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected VpcSecurityGroupId to be of type string, got %T instead", value) } - mv = *destAddr - uv = &types.ParametersMemberInfluxDBv2{Value: mv} - break loop + col = jtv + } + cv = append(cv, col) - default: - uv = &types.UnknownUnionMember{Tag: key} - break loop + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentVpcSubnetIdList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected VpcSubnetId to be of type string, got %T instead", value) + } + col = jtv } + cv = append(cv, col) + } - *v = uv + *v = cv return nil } -func awsAwsjson10_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { +func awsAwsjson10_deserializeOpDocumentCreateDbClusterOutput(v **CreateDbClusterOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2495,40 +3964,31 @@ func awsAwsjson10_deserializeDocumentResourceNotFoundException(v **types.Resourc return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ResourceNotFoundException + var sv *CreateDbClusterOutput if *v == nil { - sv = &types.ResourceNotFoundException{} + sv = &CreateDbClusterOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - case "resourceId": + case "dbClusterId": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected DbClusterId to be of type string, got %T instead", value) } - sv.ResourceId = ptr.String(jtv) + sv.DbClusterId = ptr.String(jtv) } - case "resourceType": + case "dbClusterStatus": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected ClusterStatus to be of type string, got %T instead", value) } - sv.ResourceType = ptr.String(jtv) + sv.DbClusterStatus = types.ClusterStatus(jtv) } default: @@ -2540,7 +4000,7 @@ func awsAwsjson10_deserializeDocumentResourceNotFoundException(v **types.Resourc return nil } -func awsAwsjson10_deserializeDocumentResponseTagMap(v *map[string]string, value interface{}) error { +func awsAwsjson10_deserializeOpDocumentCreateDbInstanceOutput(v **CreateDbInstanceOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2553,160 +4013,198 @@ func awsAwsjson10_deserializeDocumentResponseTagMap(v *map[string]string, value return fmt.Errorf("unexpected JSON type %v", value) } - var mv map[string]string + var sv *CreateDbInstanceOutput if *v == nil { - mv = map[string]string{} + sv = &CreateDbInstanceOutput{} } else { - mv = *v + sv = *v } for key, value := range shape { - var parsedVal string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + switch key { + case "allocatedStorage": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected AllocatedStorage to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AllocatedStorage = ptr.Int32(int32(i64)) } - parsedVal = jtv - } - mv[key] = parsedVal - } - *v = mv - return nil -} + case "arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } -func awsAwsjson10_deserializeDocumentS3Configuration(v **types.S3Configuration, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } + case "availabilityZone": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AvailabilityZone = ptr.String(jtv) + } - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } + case "dbClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbClusterId to be of type string, got %T instead", value) + } + sv.DbClusterId = ptr.String(jtv) + } + + case "dbInstanceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbInstanceType to be of type string, got %T instead", value) + } + sv.DbInstanceType = types.DbInstanceType(jtv) + } + + case "dbParameterGroupIdentifier": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbParameterGroupIdentifier to be of type string, got %T instead", value) + } + sv.DbParameterGroupIdentifier = ptr.String(jtv) + } + + case "dbStorageType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbStorageType to be of type string, got %T instead", value) + } + sv.DbStorageType = types.DbStorageType(jtv) + } - var sv *types.S3Configuration - if *v == nil { - sv = &types.S3Configuration{} - } else { - sv = *v - } + case "deploymentType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeploymentType to be of type string, got %T instead", value) + } + sv.DeploymentType = types.DeploymentType(jtv) + } - for key, value := range shape { - switch key { - case "bucketName": + case "endpoint": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.BucketName = ptr.String(jtv) + sv.Endpoint = ptr.String(jtv) } - case "enabled": + case "id": if value != nil { - jtv, ok := value.(bool) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + return fmt.Errorf("expected DbInstanceId to be of type string, got %T instead", value) } - sv.Enabled = ptr.Bool(jtv) + sv.Id = ptr.String(jtv) } - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson10_deserializeDocumentServiceQuotaExceededException(v **types.ServiceQuotaExceededException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ServiceQuotaExceededException - if *v == nil { - sv = &types.ServiceQuotaExceededException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": + case "influxAuthParametersSecretArn": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.InfluxAuthParametersSecretArn = ptr.String(jtv) } - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson10_deserializeDocumentThrottlingException(v **types.ThrottlingException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } + case "instanceMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InstanceMode to be of type string, got %T instead", value) + } + sv.InstanceMode = types.InstanceMode(jtv) + } - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } + case "logDeliveryConfiguration": + if err := awsAwsjson10_deserializeDocumentLogDeliveryConfiguration(&sv.LogDeliveryConfiguration, value); err != nil { + return err + } - var sv *types.ThrottlingException - if *v == nil { - sv = &types.ThrottlingException{} - } else { - sv = *v - } + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbInstanceName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } - for key, value := range shape { - switch key { - case "message", "Message": + case "networkType": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected NetworkType to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.NetworkType = types.NetworkType(jtv) } - case "retryAfterSeconds": + case "port": if value != nil { jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + return fmt.Errorf("expected Port to be json.Number, got %T instead", value) } i64, err := jtv.Int64() if err != nil { return err } - sv.RetryAfterSeconds = ptr.Int32(int32(i64)) + sv.Port = ptr.Int32(int32(i64)) + } + + case "publiclyAccessible": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.PubliclyAccessible = ptr.Bool(jtv) + } + + case "secondaryAvailabilityZone": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SecondaryAvailabilityZone = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Status to be of type string, got %T instead", value) + } + sv.Status = types.Status(jtv) + } + + case "vpcSecurityGroupIds": + if err := awsAwsjson10_deserializeDocumentVpcSecurityGroupIdList(&sv.VpcSecurityGroupIds, value); err != nil { + return err + } + + case "vpcSubnetIds": + if err := awsAwsjson10_deserializeDocumentVpcSubnetIdList(&sv.VpcSubnetIds, value); err != nil { + return err } default: @@ -2718,7 +4216,7 @@ func awsAwsjson10_deserializeDocumentThrottlingException(v **types.ThrottlingExc return nil } -func awsAwsjson10_deserializeDocumentValidationException(v **types.ValidationException, value interface{}) error { +func awsAwsjson10_deserializeOpDocumentCreateDbParameterGroupOutput(v **CreateDbParameterGroupOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2731,31 +4229,54 @@ func awsAwsjson10_deserializeDocumentValidationException(v **types.ValidationExc return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ValidationException + var sv *CreateDbParameterGroupOutput if *v == nil { - sv = &types.ValidationException{} + sv = &CreateDbParameterGroupOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "message", "Message": + case "arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "description": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.Description = ptr.String(jtv) } - case "reason": + case "id": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ValidationExceptionReason to be of type string, got %T instead", value) + return fmt.Errorf("expected DbParameterGroupId to be of type string, got %T instead", value) } - sv.Reason = types.ValidationExceptionReason(jtv) + sv.Id = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbParameterGroupName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "parameters": + if err := awsAwsjson10_deserializeDocumentParameters(&sv.Parameters, value); err != nil { + return err } default: @@ -2767,7 +4288,7 @@ func awsAwsjson10_deserializeDocumentValidationException(v **types.ValidationExc return nil } -func awsAwsjson10_deserializeDocumentVpcSecurityGroupIdList(v *[]string, value interface{}) error { +func awsAwsjson10_deserializeOpDocumentDeleteDbClusterOutput(v **DeleteDbClusterOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2775,71 +4296,39 @@ func awsAwsjson10_deserializeDocumentVpcSecurityGroupIdList(v *[]string, value i return nil } - shape, ok := value.([]interface{}) + shape, ok := value.(map[string]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var cv []string + var sv *DeleteDbClusterOutput if *v == nil { - cv = []string{} + sv = &DeleteDbClusterOutput{} } else { - cv = *v + sv = *v } - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected VpcSecurityGroupId to be of type string, got %T instead", value) + for key, value := range shape { + switch key { + case "dbClusterStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClusterStatus to be of type string, got %T instead", value) + } + sv.DbClusterStatus = types.ClusterStatus(jtv) } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsAwsjson10_deserializeDocumentVpcSubnetIdList(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } + default: + _, _ = key, value - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected VpcSubnetId to be of type string, got %T instead", value) - } - col = jtv } - cv = append(cv, col) - } - *v = cv + *v = sv return nil } -func awsAwsjson10_deserializeOpDocumentCreateDbInstanceOutput(v **CreateDbInstanceOutput, value interface{}) error { +func awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(v **DeleteDbInstanceOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2852,9 +4341,9 @@ func awsAwsjson10_deserializeOpDocumentCreateDbInstanceOutput(v **CreateDbInstan return fmt.Errorf("unexpected JSON type %v", value) } - var sv *CreateDbInstanceOutput + var sv *DeleteDbInstanceOutput if *v == nil { - sv = &CreateDbInstanceOutput{} + sv = &DeleteDbInstanceOutput{} } else { sv = *v } @@ -2892,6 +4381,15 @@ func awsAwsjson10_deserializeOpDocumentCreateDbInstanceOutput(v **CreateDbInstan sv.AvailabilityZone = ptr.String(jtv) } + case "dbClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbClusterId to be of type string, got %T instead", value) + } + sv.DbClusterId = ptr.String(jtv) + } + case "dbInstanceType": if value != nil { jtv, ok := value.(string) @@ -2955,6 +4453,15 @@ func awsAwsjson10_deserializeOpDocumentCreateDbInstanceOutput(v **CreateDbInstan sv.InfluxAuthParametersSecretArn = ptr.String(jtv) } + case "instanceMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InstanceMode to be of type string, got %T instead", value) + } + sv.InstanceMode = types.InstanceMode(jtv) + } + case "logDeliveryConfiguration": if err := awsAwsjson10_deserializeDocumentLogDeliveryConfiguration(&sv.LogDeliveryConfiguration, value); err != nil { return err @@ -3037,79 +4544,7 @@ func awsAwsjson10_deserializeOpDocumentCreateDbInstanceOutput(v **CreateDbInstan return nil } -func awsAwsjson10_deserializeOpDocumentCreateDbParameterGroupOutput(v **CreateDbParameterGroupOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreateDbParameterGroupOutput - if *v == nil { - sv = &CreateDbParameterGroupOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "arn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Arn to be of type string, got %T instead", value) - } - sv.Arn = ptr.String(jtv) - } - - case "description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) - } - sv.Description = ptr.String(jtv) - } - - case "id": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected DbParameterGroupId to be of type string, got %T instead", value) - } - sv.Id = ptr.String(jtv) - } - - case "name": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected DbParameterGroupName to be of type string, got %T instead", value) - } - sv.Name = ptr.String(jtv) - } - - case "parameters": - if err := awsAwsjson10_deserializeDocumentParameters(&sv.Parameters, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(v **DeleteDbInstanceOutput, value interface{}) error { +func awsAwsjson10_deserializeOpDocumentGetDbClusterOutput(v **GetDbClusterOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -3122,9 +4557,9 @@ func awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(v **DeleteDbInstan return fmt.Errorf("unexpected JSON type %v", value) } - var sv *DeleteDbInstanceOutput + var sv *GetDbClusterOutput if *v == nil { - sv = &DeleteDbInstanceOutput{} + sv = &GetDbClusterOutput{} } else { sv = *v } @@ -3153,15 +4588,6 @@ func awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(v **DeleteDbInstan sv.Arn = ptr.String(jtv) } - case "availabilityZone": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) - } - sv.AvailabilityZone = ptr.String(jtv) - } - case "dbInstanceType": if value != nil { jtv, ok := value.(string) @@ -3193,9 +4619,9 @@ func awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(v **DeleteDbInstan if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected DeploymentType to be of type string, got %T instead", value) + return fmt.Errorf("expected ClusterDeploymentType to be of type string, got %T instead", value) } - sv.DeploymentType = types.DeploymentType(jtv) + sv.DeploymentType = types.ClusterDeploymentType(jtv) } case "endpoint": @@ -3207,11 +4633,20 @@ func awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(v **DeleteDbInstan sv.Endpoint = ptr.String(jtv) } + case "failoverMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FailoverMode to be of type string, got %T instead", value) + } + sv.FailoverMode = types.FailoverMode(jtv) + } + case "id": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected DbInstanceId to be of type string, got %T instead", value) + return fmt.Errorf("expected DbClusterId to be of type string, got %T instead", value) } sv.Id = ptr.String(jtv) } @@ -3234,7 +4669,7 @@ func awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(v **DeleteDbInstan if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected DbInstanceName to be of type string, got %T instead", value) + return fmt.Errorf("expected DbClusterName to be of type string, got %T instead", value) } sv.Name = ptr.String(jtv) } @@ -3270,22 +4705,22 @@ func awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(v **DeleteDbInstan sv.PubliclyAccessible = ptr.Bool(jtv) } - case "secondaryAvailabilityZone": + case "readerEndpoint": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.SecondaryAvailabilityZone = ptr.String(jtv) + sv.ReaderEndpoint = ptr.String(jtv) } case "status": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Status to be of type string, got %T instead", value) + return fmt.Errorf("expected ClusterStatus to be of type string, got %T instead", value) } - sv.Status = types.Status(jtv) + sv.Status = types.ClusterStatus(jtv) } case "vpcSecurityGroupIds": @@ -3360,6 +4795,15 @@ func awsAwsjson10_deserializeOpDocumentGetDbInstanceOutput(v **GetDbInstanceOutp sv.AvailabilityZone = ptr.String(jtv) } + case "dbClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbClusterId to be of type string, got %T instead", value) + } + sv.DbClusterId = ptr.String(jtv) + } + case "dbInstanceType": if value != nil { jtv, ok := value.(string) @@ -3423,6 +4867,15 @@ func awsAwsjson10_deserializeOpDocumentGetDbInstanceOutput(v **GetDbInstanceOutp sv.InfluxAuthParametersSecretArn = ptr.String(jtv) } + case "instanceMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InstanceMode to be of type string, got %T instead", value) + } + sv.InstanceMode = types.InstanceMode(jtv) + } + case "logDeliveryConfiguration": if err := awsAwsjson10_deserializeDocumentLogDeliveryConfiguration(&sv.LogDeliveryConfiguration, value); err != nil { return err @@ -3577,6 +5030,96 @@ func awsAwsjson10_deserializeOpDocumentGetDbParameterGroupOutput(v **GetDbParame return nil } +func awsAwsjson10_deserializeOpDocumentListDbClustersOutput(v **ListDbClustersOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListDbClustersOutput + if *v == nil { + sv = &ListDbClustersOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "items": + if err := awsAwsjson10_deserializeDocumentDbClusterSummaryList(&sv.Items, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentListDbInstancesForClusterOutput(v **ListDbInstancesForClusterOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListDbInstancesForClusterOutput + if *v == nil { + sv = &ListDbInstancesForClusterOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "items": + if err := awsAwsjson10_deserializeDocumentDbInstanceForClusterSummaryList(&sv.Items, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeOpDocumentListDbInstancesOutput(v **ListDbInstancesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3703,6 +5246,46 @@ func awsAwsjson10_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsFor return nil } +func awsAwsjson10_deserializeOpDocumentUpdateDbClusterOutput(v **UpdateDbClusterOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateDbClusterOutput + if *v == nil { + sv = &UpdateDbClusterOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "dbClusterStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClusterStatus to be of type string, got %T instead", value) + } + sv.DbClusterStatus = types.ClusterStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeOpDocumentUpdateDbInstanceOutput(v **UpdateDbInstanceOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3756,6 +5339,15 @@ func awsAwsjson10_deserializeOpDocumentUpdateDbInstanceOutput(v **UpdateDbInstan sv.AvailabilityZone = ptr.String(jtv) } + case "dbClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DbClusterId to be of type string, got %T instead", value) + } + sv.DbClusterId = ptr.String(jtv) + } + case "dbInstanceType": if value != nil { jtv, ok := value.(string) @@ -3819,6 +5411,15 @@ func awsAwsjson10_deserializeOpDocumentUpdateDbInstanceOutput(v **UpdateDbInstan sv.InfluxAuthParametersSecretArn = ptr.String(jtv) } + case "instanceMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InstanceMode to be of type string, got %T instead", value) + } + sv.InstanceMode = types.InstanceMode(jtv) + } + case "logDeliveryConfiguration": if err := awsAwsjson10_deserializeDocumentLogDeliveryConfiguration(&sv.LogDeliveryConfiguration, value); err != nil { return err diff --git a/service/timestreaminfluxdb/generated.json b/service/timestreaminfluxdb/generated.json index 2aa4f0ace36..dc997908fe6 100644 --- a/service/timestreaminfluxdb/generated.json +++ b/service/timestreaminfluxdb/generated.json @@ -8,16 +8,22 @@ "files": [ "api_client.go", "api_client_test.go", + "api_op_CreateDbCluster.go", "api_op_CreateDbInstance.go", "api_op_CreateDbParameterGroup.go", + "api_op_DeleteDbCluster.go", "api_op_DeleteDbInstance.go", + "api_op_GetDbCluster.go", "api_op_GetDbInstance.go", "api_op_GetDbParameterGroup.go", + "api_op_ListDbClusters.go", "api_op_ListDbInstances.go", + "api_op_ListDbInstancesForCluster.go", "api_op_ListDbParameterGroups.go", "api_op_ListTagsForResource.go", "api_op_TagResource.go", "api_op_UntagResource.go", + "api_op_UpdateDbCluster.go", "api_op_UpdateDbInstance.go", "auth.go", "deserializers.go", diff --git a/service/timestreaminfluxdb/serializers.go b/service/timestreaminfluxdb/serializers.go index cca2057fc91..a97f95f0985 100644 --- a/service/timestreaminfluxdb/serializers.go +++ b/service/timestreaminfluxdb/serializers.go @@ -16,6 +16,67 @@ import ( "path" ) +type awsAwsjson10_serializeOpCreateDbCluster struct { +} + +func (*awsAwsjson10_serializeOpCreateDbCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpCreateDbCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateDbClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.CreateDbCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentCreateDbClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + type awsAwsjson10_serializeOpCreateDbInstance struct { } @@ -138,6 +199,67 @@ func (m *awsAwsjson10_serializeOpCreateDbParameterGroup) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } +type awsAwsjson10_serializeOpDeleteDbCluster struct { +} + +func (*awsAwsjson10_serializeOpDeleteDbCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDeleteDbCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteDbClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.DeleteDbCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDeleteDbClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + type awsAwsjson10_serializeOpDeleteDbInstance struct { } @@ -181,7 +303,251 @@ func (m *awsAwsjson10_serializeOpDeleteDbInstance) HandleSerialize(ctx context.C httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.DeleteDbInstance") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson10_serializeOpDocumentDeleteDbInstanceInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson10_serializeOpDocumentDeleteDbInstanceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpGetDbCluster struct { +} + +func (*awsAwsjson10_serializeOpGetDbCluster) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpGetDbCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetDbClusterInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.GetDbCluster") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentGetDbClusterInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpGetDbInstance struct { +} + +func (*awsAwsjson10_serializeOpGetDbInstance) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpGetDbInstance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetDbInstanceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.GetDbInstance") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentGetDbInstanceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpGetDbParameterGroup struct { +} + +func (*awsAwsjson10_serializeOpGetDbParameterGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpGetDbParameterGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetDbParameterGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.GetDbParameterGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentGetDbParameterGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpListDbClusters struct { +} + +func (*awsAwsjson10_serializeOpListDbClusters) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpListDbClusters) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListDbClustersInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.ListDbClusters") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentListDbClustersInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -199,14 +565,14 @@ func (m *awsAwsjson10_serializeOpDeleteDbInstance) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } -type awsAwsjson10_serializeOpGetDbInstance struct { +type awsAwsjson10_serializeOpListDbInstances struct { } -func (*awsAwsjson10_serializeOpGetDbInstance) ID() string { +func (*awsAwsjson10_serializeOpListDbInstances) ID() string { return "OperationSerializer" } -func (m *awsAwsjson10_serializeOpGetDbInstance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson10_serializeOpListDbInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -218,7 +584,7 @@ func (m *awsAwsjson10_serializeOpGetDbInstance) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetDbInstanceInput) + input, ok := in.Parameters.(*ListDbInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -239,10 +605,10 @@ func (m *awsAwsjson10_serializeOpGetDbInstance) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.GetDbInstance") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.ListDbInstances") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson10_serializeOpDocumentGetDbInstanceInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson10_serializeOpDocumentListDbInstancesInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -260,14 +626,14 @@ func (m *awsAwsjson10_serializeOpGetDbInstance) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } -type awsAwsjson10_serializeOpGetDbParameterGroup struct { +type awsAwsjson10_serializeOpListDbInstancesForCluster struct { } -func (*awsAwsjson10_serializeOpGetDbParameterGroup) ID() string { +func (*awsAwsjson10_serializeOpListDbInstancesForCluster) ID() string { return "OperationSerializer" } -func (m *awsAwsjson10_serializeOpGetDbParameterGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson10_serializeOpListDbInstancesForCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -279,7 +645,7 @@ func (m *awsAwsjson10_serializeOpGetDbParameterGroup) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetDbParameterGroupInput) + input, ok := in.Parameters.(*ListDbInstancesForClusterInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -300,10 +666,10 @@ func (m *awsAwsjson10_serializeOpGetDbParameterGroup) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.GetDbParameterGroup") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.ListDbInstancesForCluster") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson10_serializeOpDocumentGetDbParameterGroupInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson10_serializeOpDocumentListDbInstancesForClusterInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -321,14 +687,14 @@ func (m *awsAwsjson10_serializeOpGetDbParameterGroup) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsAwsjson10_serializeOpListDbInstances struct { +type awsAwsjson10_serializeOpListDbParameterGroups struct { } -func (*awsAwsjson10_serializeOpListDbInstances) ID() string { +func (*awsAwsjson10_serializeOpListDbParameterGroups) ID() string { return "OperationSerializer" } -func (m *awsAwsjson10_serializeOpListDbInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson10_serializeOpListDbParameterGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -340,7 +706,7 @@ func (m *awsAwsjson10_serializeOpListDbInstances) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ListDbInstancesInput) + input, ok := in.Parameters.(*ListDbParameterGroupsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -361,10 +727,10 @@ func (m *awsAwsjson10_serializeOpListDbInstances) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.ListDbInstances") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.ListDbParameterGroups") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson10_serializeOpDocumentListDbInstancesInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson10_serializeOpDocumentListDbParameterGroupsInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -382,14 +748,14 @@ func (m *awsAwsjson10_serializeOpListDbInstances) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsAwsjson10_serializeOpListDbParameterGroups struct { +type awsAwsjson10_serializeOpListTagsForResource struct { } -func (*awsAwsjson10_serializeOpListDbParameterGroups) ID() string { +func (*awsAwsjson10_serializeOpListTagsForResource) ID() string { return "OperationSerializer" } -func (m *awsAwsjson10_serializeOpListDbParameterGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson10_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -401,7 +767,7 @@ func (m *awsAwsjson10_serializeOpListDbParameterGroups) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ListDbParameterGroupsInput) + input, ok := in.Parameters.(*ListTagsForResourceInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -422,10 +788,10 @@ func (m *awsAwsjson10_serializeOpListDbParameterGroups) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.ListDbParameterGroups") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.ListTagsForResource") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson10_serializeOpDocumentListDbParameterGroupsInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson10_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -443,14 +809,14 @@ func (m *awsAwsjson10_serializeOpListDbParameterGroups) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsAwsjson10_serializeOpListTagsForResource struct { +type awsAwsjson10_serializeOpTagResource struct { } -func (*awsAwsjson10_serializeOpListTagsForResource) ID() string { +func (*awsAwsjson10_serializeOpTagResource) ID() string { return "OperationSerializer" } -func (m *awsAwsjson10_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson10_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -462,7 +828,7 @@ func (m *awsAwsjson10_serializeOpListTagsForResource) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ListTagsForResourceInput) + input, ok := in.Parameters.(*TagResourceInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -483,10 +849,10 @@ func (m *awsAwsjson10_serializeOpListTagsForResource) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.ListTagsForResource") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.TagResource") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson10_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson10_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -504,14 +870,14 @@ func (m *awsAwsjson10_serializeOpListTagsForResource) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsAwsjson10_serializeOpTagResource struct { +type awsAwsjson10_serializeOpUntagResource struct { } -func (*awsAwsjson10_serializeOpTagResource) ID() string { +func (*awsAwsjson10_serializeOpUntagResource) ID() string { return "OperationSerializer" } -func (m *awsAwsjson10_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson10_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -523,7 +889,7 @@ func (m *awsAwsjson10_serializeOpTagResource) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*TagResourceInput) + input, ok := in.Parameters.(*UntagResourceInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -544,10 +910,10 @@ func (m *awsAwsjson10_serializeOpTagResource) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.TagResource") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.UntagResource") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson10_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson10_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -565,14 +931,14 @@ func (m *awsAwsjson10_serializeOpTagResource) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsAwsjson10_serializeOpUntagResource struct { +type awsAwsjson10_serializeOpUpdateDbCluster struct { } -func (*awsAwsjson10_serializeOpUntagResource) ID() string { +func (*awsAwsjson10_serializeOpUpdateDbCluster) ID() string { return "OperationSerializer" } -func (m *awsAwsjson10_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsAwsjson10_serializeOpUpdateDbCluster) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -584,7 +950,7 @@ func (m *awsAwsjson10_serializeOpUntagResource) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*UntagResourceInput) + input, ok := in.Parameters.(*UpdateDbClusterInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -605,10 +971,10 @@ func (m *awsAwsjson10_serializeOpUntagResource) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: err} } httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") - httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.UntagResource") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonTimestreamInfluxDB.UpdateDbCluster") jsonEncoder := smithyjson.NewEncoder() - if err := awsAwsjson10_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + if err := awsAwsjson10_serializeOpDocumentUpdateDbClusterInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -989,6 +1355,111 @@ func awsAwsjson10_serializeDocumentVpcSubnetIdList(v []string, value smithyjson. return nil } +func awsAwsjson10_serializeOpDocumentCreateDbClusterInput(v *CreateDbClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AllocatedStorage != nil { + ok := object.Key("allocatedStorage") + ok.Integer(*v.AllocatedStorage) + } + + if v.Bucket != nil { + ok := object.Key("bucket") + ok.String(*v.Bucket) + } + + if len(v.DbInstanceType) > 0 { + ok := object.Key("dbInstanceType") + ok.String(string(v.DbInstanceType)) + } + + if v.DbParameterGroupIdentifier != nil { + ok := object.Key("dbParameterGroupIdentifier") + ok.String(*v.DbParameterGroupIdentifier) + } + + if len(v.DbStorageType) > 0 { + ok := object.Key("dbStorageType") + ok.String(string(v.DbStorageType)) + } + + if len(v.DeploymentType) > 0 { + ok := object.Key("deploymentType") + ok.String(string(v.DeploymentType)) + } + + if len(v.FailoverMode) > 0 { + ok := object.Key("failoverMode") + ok.String(string(v.FailoverMode)) + } + + if v.LogDeliveryConfiguration != nil { + ok := object.Key("logDeliveryConfiguration") + if err := awsAwsjson10_serializeDocumentLogDeliveryConfiguration(v.LogDeliveryConfiguration, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if len(v.NetworkType) > 0 { + ok := object.Key("networkType") + ok.String(string(v.NetworkType)) + } + + if v.Organization != nil { + ok := object.Key("organization") + ok.String(*v.Organization) + } + + if v.Password != nil { + ok := object.Key("password") + ok.String(*v.Password) + } + + if v.Port != nil { + ok := object.Key("port") + ok.Integer(*v.Port) + } + + if v.PubliclyAccessible != nil { + ok := object.Key("publiclyAccessible") + ok.Boolean(*v.PubliclyAccessible) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson10_serializeDocumentRequestTagMap(v.Tags, ok); err != nil { + return err + } + } + + if v.Username != nil { + ok := object.Key("username") + ok.String(*v.Username) + } + + if v.VpcSecurityGroupIds != nil { + ok := object.Key("vpcSecurityGroupIds") + if err := awsAwsjson10_serializeDocumentVpcSecurityGroupIdList(v.VpcSecurityGroupIds, ok); err != nil { + return err + } + } + + if v.VpcSubnetIds != nil { + ok := object.Key("vpcSubnetIds") + if err := awsAwsjson10_serializeDocumentVpcSubnetIdList(v.VpcSubnetIds, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson10_serializeOpDocumentCreateDbInstanceInput(v *CreateDbInstanceInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -1120,6 +1591,18 @@ func awsAwsjson10_serializeOpDocumentCreateDbParameterGroupInput(v *CreateDbPara return nil } +func awsAwsjson10_serializeOpDocumentDeleteDbClusterInput(v *DeleteDbClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DbClusterId != nil { + ok := object.Key("dbClusterId") + ok.String(*v.DbClusterId) + } + + return nil +} + func awsAwsjson10_serializeOpDocumentDeleteDbInstanceInput(v *DeleteDbInstanceInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -1132,6 +1615,18 @@ func awsAwsjson10_serializeOpDocumentDeleteDbInstanceInput(v *DeleteDbInstanceIn return nil } +func awsAwsjson10_serializeOpDocumentGetDbClusterInput(v *GetDbClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DbClusterId != nil { + ok := object.Key("dbClusterId") + ok.String(*v.DbClusterId) + } + + return nil +} + func awsAwsjson10_serializeOpDocumentGetDbInstanceInput(v *GetDbInstanceInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -1156,6 +1651,45 @@ func awsAwsjson10_serializeOpDocumentGetDbParameterGroupInput(v *GetDbParameterG return nil } +func awsAwsjson10_serializeOpDocumentListDbClustersInput(v *ListDbClustersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentListDbInstancesForClusterInput(v *ListDbInstancesForClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DbClusterId != nil { + ok := object.Key("dbClusterId") + ok.String(*v.DbClusterId) + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + return nil +} + func awsAwsjson10_serializeOpDocumentListDbInstancesInput(v *ListDbInstancesInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -1240,6 +1774,45 @@ func awsAwsjson10_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, v return nil } +func awsAwsjson10_serializeOpDocumentUpdateDbClusterInput(v *UpdateDbClusterInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DbClusterId != nil { + ok := object.Key("dbClusterId") + ok.String(*v.DbClusterId) + } + + if len(v.DbInstanceType) > 0 { + ok := object.Key("dbInstanceType") + ok.String(string(v.DbInstanceType)) + } + + if v.DbParameterGroupIdentifier != nil { + ok := object.Key("dbParameterGroupIdentifier") + ok.String(*v.DbParameterGroupIdentifier) + } + + if len(v.FailoverMode) > 0 { + ok := object.Key("failoverMode") + ok.String(string(v.FailoverMode)) + } + + if v.LogDeliveryConfiguration != nil { + ok := object.Key("logDeliveryConfiguration") + if err := awsAwsjson10_serializeDocumentLogDeliveryConfiguration(v.LogDeliveryConfiguration, ok); err != nil { + return err + } + } + + if v.Port != nil { + ok := object.Key("port") + ok.Integer(*v.Port) + } + + return nil +} + func awsAwsjson10_serializeOpDocumentUpdateDbInstanceInput(v *UpdateDbInstanceInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/timestreaminfluxdb/snapshot/api_op_CreateDbCluster.go.snap b/service/timestreaminfluxdb/snapshot/api_op_CreateDbCluster.go.snap new file mode 100644 index 00000000000..5c4ecdcb84d --- /dev/null +++ b/service/timestreaminfluxdb/snapshot/api_op_CreateDbCluster.go.snap @@ -0,0 +1,41 @@ +CreateDbCluster + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + spanRetryLoop + Retry + RetryMetricsHeader + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/timestreaminfluxdb/snapshot/api_op_DeleteDbCluster.go.snap b/service/timestreaminfluxdb/snapshot/api_op_DeleteDbCluster.go.snap new file mode 100644 index 00000000000..4b83ee442d2 --- /dev/null +++ b/service/timestreaminfluxdb/snapshot/api_op_DeleteDbCluster.go.snap @@ -0,0 +1,41 @@ +DeleteDbCluster + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + spanRetryLoop + Retry + RetryMetricsHeader + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/timestreaminfluxdb/snapshot/api_op_GetDbCluster.go.snap b/service/timestreaminfluxdb/snapshot/api_op_GetDbCluster.go.snap new file mode 100644 index 00000000000..875fc3223be --- /dev/null +++ b/service/timestreaminfluxdb/snapshot/api_op_GetDbCluster.go.snap @@ -0,0 +1,41 @@ +GetDbCluster + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + spanRetryLoop + Retry + RetryMetricsHeader + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/timestreaminfluxdb/snapshot/api_op_ListDbClusters.go.snap b/service/timestreaminfluxdb/snapshot/api_op_ListDbClusters.go.snap new file mode 100644 index 00000000000..48a6bb3bc8d --- /dev/null +++ b/service/timestreaminfluxdb/snapshot/api_op_ListDbClusters.go.snap @@ -0,0 +1,40 @@ +ListDbClusters + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + spanRetryLoop + Retry + RetryMetricsHeader + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/timestreaminfluxdb/snapshot/api_op_ListDbInstancesForCluster.go.snap b/service/timestreaminfluxdb/snapshot/api_op_ListDbInstancesForCluster.go.snap new file mode 100644 index 00000000000..66065cbd817 --- /dev/null +++ b/service/timestreaminfluxdb/snapshot/api_op_ListDbInstancesForCluster.go.snap @@ -0,0 +1,41 @@ +ListDbInstancesForCluster + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + spanRetryLoop + Retry + RetryMetricsHeader + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/timestreaminfluxdb/snapshot/api_op_UpdateDbCluster.go.snap b/service/timestreaminfluxdb/snapshot/api_op_UpdateDbCluster.go.snap new file mode 100644 index 00000000000..06a62b14f51 --- /dev/null +++ b/service/timestreaminfluxdb/snapshot/api_op_UpdateDbCluster.go.snap @@ -0,0 +1,41 @@ +UpdateDbCluster + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + spanRetryLoop + Retry + RetryMetricsHeader + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/timestreaminfluxdb/snapshot_test.go b/service/timestreaminfluxdb/snapshot_test.go index 260a81c67b0..d0910553c84 100644 --- a/service/timestreaminfluxdb/snapshot_test.go +++ b/service/timestreaminfluxdb/snapshot_test.go @@ -62,6 +62,18 @@ func testSnapshot(stack *middleware.Stack, operation string) error { } return snapshotOK{} } +func TestCheckSnapshot_CreateDbCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateDbCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CreateDbCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_CreateDbInstance(t *testing.T) { svc := New(Options{}) _, err := svc.CreateDbInstance(context.Background(), nil, func(o *Options) { @@ -86,6 +98,18 @@ func TestCheckSnapshot_CreateDbParameterGroup(t *testing.T) { } } +func TestCheckSnapshot_DeleteDbCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteDbCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteDbCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_DeleteDbInstance(t *testing.T) { svc := New(Options{}) _, err := svc.DeleteDbInstance(context.Background(), nil, func(o *Options) { @@ -98,6 +122,18 @@ func TestCheckSnapshot_DeleteDbInstance(t *testing.T) { } } +func TestCheckSnapshot_GetDbCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetDbCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "GetDbCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_GetDbInstance(t *testing.T) { svc := New(Options{}) _, err := svc.GetDbInstance(context.Background(), nil, func(o *Options) { @@ -122,6 +158,18 @@ func TestCheckSnapshot_GetDbParameterGroup(t *testing.T) { } } +func TestCheckSnapshot_ListDbClusters(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListDbClusters(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListDbClusters") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_ListDbInstances(t *testing.T) { svc := New(Options{}) _, err := svc.ListDbInstances(context.Background(), nil, func(o *Options) { @@ -134,6 +182,18 @@ func TestCheckSnapshot_ListDbInstances(t *testing.T) { } } +func TestCheckSnapshot_ListDbInstancesForCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListDbInstancesForCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListDbInstancesForCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_ListDbParameterGroups(t *testing.T) { svc := New(Options{}) _, err := svc.ListDbParameterGroups(context.Background(), nil, func(o *Options) { @@ -182,6 +242,18 @@ func TestCheckSnapshot_UntagResource(t *testing.T) { } } +func TestCheckSnapshot_UpdateDbCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateDbCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateDbCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_UpdateDbInstance(t *testing.T) { svc := New(Options{}) _, err := svc.UpdateDbInstance(context.Background(), nil, func(o *Options) { @@ -193,6 +265,18 @@ func TestCheckSnapshot_UpdateDbInstance(t *testing.T) { t.Fatal(err) } } +func TestUpdateSnapshot_CreateDbCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateDbCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CreateDbCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_CreateDbInstance(t *testing.T) { svc := New(Options{}) _, err := svc.CreateDbInstance(context.Background(), nil, func(o *Options) { @@ -217,6 +301,18 @@ func TestUpdateSnapshot_CreateDbParameterGroup(t *testing.T) { } } +func TestUpdateSnapshot_DeleteDbCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteDbCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteDbCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_DeleteDbInstance(t *testing.T) { svc := New(Options{}) _, err := svc.DeleteDbInstance(context.Background(), nil, func(o *Options) { @@ -229,6 +325,18 @@ func TestUpdateSnapshot_DeleteDbInstance(t *testing.T) { } } +func TestUpdateSnapshot_GetDbCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetDbCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "GetDbCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_GetDbInstance(t *testing.T) { svc := New(Options{}) _, err := svc.GetDbInstance(context.Background(), nil, func(o *Options) { @@ -253,6 +361,18 @@ func TestUpdateSnapshot_GetDbParameterGroup(t *testing.T) { } } +func TestUpdateSnapshot_ListDbClusters(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListDbClusters(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListDbClusters") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_ListDbInstances(t *testing.T) { svc := New(Options{}) _, err := svc.ListDbInstances(context.Background(), nil, func(o *Options) { @@ -265,6 +385,18 @@ func TestUpdateSnapshot_ListDbInstances(t *testing.T) { } } +func TestUpdateSnapshot_ListDbInstancesForCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListDbInstancesForCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListDbInstancesForCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_ListDbParameterGroups(t *testing.T) { svc := New(Options{}) _, err := svc.ListDbParameterGroups(context.Background(), nil, func(o *Options) { @@ -313,6 +445,18 @@ func TestUpdateSnapshot_UntagResource(t *testing.T) { } } +func TestUpdateSnapshot_UpdateDbCluster(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateDbCluster(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateDbCluster") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_UpdateDbInstance(t *testing.T) { svc := New(Options{}) _, err := svc.UpdateDbInstance(context.Background(), nil, func(o *Options) { diff --git a/service/timestreaminfluxdb/types/enums.go b/service/timestreaminfluxdb/types/enums.go index 687f5e1ff07..6b814b33366 100644 --- a/service/timestreaminfluxdb/types/enums.go +++ b/service/timestreaminfluxdb/types/enums.go @@ -2,6 +2,50 @@ package types +type ClusterDeploymentType string + +// Enum values for ClusterDeploymentType +const ( + ClusterDeploymentTypeMultiNodeReadReplicas ClusterDeploymentType = "MULTI_NODE_READ_REPLICAS" +) + +// Values returns all known values for ClusterDeploymentType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ClusterDeploymentType) Values() []ClusterDeploymentType { + return []ClusterDeploymentType{ + "MULTI_NODE_READ_REPLICAS", + } +} + +type ClusterStatus string + +// Enum values for ClusterStatus +const ( + ClusterStatusCreating ClusterStatus = "CREATING" + ClusterStatusUpdating ClusterStatus = "UPDATING" + ClusterStatusDeleting ClusterStatus = "DELETING" + ClusterStatusAvailable ClusterStatus = "AVAILABLE" + ClusterStatusFailed ClusterStatus = "FAILED" + ClusterStatusDeleted ClusterStatus = "DELETED" +) + +// Values returns all known values for ClusterStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ClusterStatus) Values() []ClusterStatus { + return []ClusterStatus{ + "CREATING", + "UPDATING", + "DELETING", + "AVAILABLE", + "FAILED", + "DELETED", + } +} + type DbInstanceType string // Enum values for DbInstanceType @@ -96,6 +140,46 @@ func (DurationType) Values() []DurationType { } } +type FailoverMode string + +// Enum values for FailoverMode +const ( + FailoverModeAutomatic FailoverMode = "AUTOMATIC" + FailoverModeNoFailover FailoverMode = "NO_FAILOVER" +) + +// Values returns all known values for FailoverMode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (FailoverMode) Values() []FailoverMode { + return []FailoverMode{ + "AUTOMATIC", + "NO_FAILOVER", + } +} + +type InstanceMode string + +// Enum values for InstanceMode +const ( + InstanceModePrimary InstanceMode = "PRIMARY" + InstanceModeStandby InstanceMode = "STANDBY" + InstanceModeReplica InstanceMode = "REPLICA" +) + +// Values returns all known values for InstanceMode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (InstanceMode) Values() []InstanceMode { + return []InstanceMode{ + "PRIMARY", + "STANDBY", + "REPLICA", + } +} + type LogLevel string // Enum values for LogLevel diff --git a/service/timestreaminfluxdb/types/types.go b/service/timestreaminfluxdb/types/types.go index e3609d9e579..06c41d81b2e 100644 --- a/service/timestreaminfluxdb/types/types.go +++ b/service/timestreaminfluxdb/types/types.go @@ -6,6 +6,109 @@ import ( smithydocument "github.com/aws/smithy-go/document" ) +// Describes a summary of a Timestream for InfluxDB cluster. +type DbClusterSummary struct { + + // The Amazon Resource Name (ARN) of the DB cluster. + // + // This member is required. + Arn *string + + // Service-generated unique identifier of the DB cluster to retrieve. + // + // This member is required. + Id *string + + // Customer supplied name of the Timestream for InfluxDB cluster. + // + // This member is required. + Name *string + + // The amount of storage allocated for your DB storage type (in gibibytes). + AllocatedStorage *int32 + + // The Timestream for InfluxDB DB instance type that InfluxDB runs on. + DbInstanceType DbInstanceType + + // The Timestream for InfluxDB DB storage type that InfluxDB stores data on. + DbStorageType DbStorageType + + // Deployment type of the DB cluster + DeploymentType ClusterDeploymentType + + // The endpoint used to connect to the Timestream for InfluxDB cluster for write + // and read operations. + Endpoint *string + + // Specifies whether the network type of the Timestream for InfluxDB Cluster is + // IPv4, which can communicate over IPv4 protocol only, or DUAL, which can + // communicate over both IPv4 and IPv6 protocols. + NetworkType NetworkType + + // The port number on which InfluxDB accepts connections. + Port *int32 + + // The endpoint used to connect to the Timestream for InfluxDB cluster for + // read-only operations. + ReaderEndpoint *string + + // The status of the DB cluster. + Status ClusterStatus + + noSmithyDocumentSerde +} + +// Contains a summary of a DB instance belonging to a DB cluster. +type DbInstanceForClusterSummary struct { + + // The Amazon Resource Name (ARN) of the DB instance. + // + // This member is required. + Arn *string + + // The service-generated unique identifier of the DB instance. + // + // This member is required. + Id *string + + // A service-generated name for the DB instance based on the customer-supplied + // name for the DB cluster. + // + // This member is required. + Name *string + + // The amount of storage allocated for your DB storage type in GiB (gibibytes). + AllocatedStorage *int32 + + // The Timestream for InfluxDB instance type to run InfluxDB on. + DbInstanceType DbInstanceType + + // The storage type for your DB instance. + DbStorageType DbStorageType + + // Specifies the deployment type if applicable. + DeploymentType DeploymentType + + // The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086. + Endpoint *string + + // Specifies the DB instance's role in the cluster. + InstanceMode InstanceMode + + // Specifies whether the network type of the Timestream for InfluxDB instance is + // IPv4, which can communicate over IPv4 protocol only, or DUAL, which can + // communicate over both IPv4 and IPv6 protocols. + NetworkType NetworkType + + // The port number on which InfluxDB accepts connections. + Port *int32 + + // The status of the DB instance. + Status Status + + noSmithyDocumentSerde +} + // Contains a summary of a DB instance. type DbInstanceSummary struct { diff --git a/service/timestreaminfluxdb/validators.go b/service/timestreaminfluxdb/validators.go index 5e046958339..98a3b166cdc 100644 --- a/service/timestreaminfluxdb/validators.go +++ b/service/timestreaminfluxdb/validators.go @@ -10,6 +10,26 @@ import ( "github.com/aws/smithy-go/middleware" ) +type validateOpCreateDbCluster struct { +} + +func (*validateOpCreateDbCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateDbCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateDbClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateDbClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateDbInstance struct { } @@ -50,6 +70,26 @@ func (m *validateOpCreateDbParameterGroup) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpDeleteDbCluster struct { +} + +func (*validateOpDeleteDbCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteDbCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteDbClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteDbClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteDbInstance struct { } @@ -70,6 +110,26 @@ func (m *validateOpDeleteDbInstance) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpGetDbCluster struct { +} + +func (*validateOpGetDbCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetDbCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetDbClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetDbClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetDbInstance struct { } @@ -110,6 +170,26 @@ func (m *validateOpGetDbParameterGroup) HandleInitialize(ctx context.Context, in return next.HandleInitialize(ctx, in) } +type validateOpListDbInstancesForCluster struct { +} + +func (*validateOpListDbInstancesForCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListDbInstancesForCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListDbInstancesForClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListDbInstancesForClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpListTagsForResource struct { } @@ -170,6 +250,26 @@ func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middl return next.HandleInitialize(ctx, in) } +type validateOpUpdateDbCluster struct { +} + +func (*validateOpUpdateDbCluster) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateDbCluster) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateDbClusterInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateDbClusterInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpUpdateDbInstance struct { } @@ -190,6 +290,10 @@ func (m *validateOpUpdateDbInstance) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +func addOpCreateDbClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateDbCluster{}, middleware.After) +} + func addOpCreateDbInstanceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateDbInstance{}, middleware.After) } @@ -198,10 +302,18 @@ func addOpCreateDbParameterGroupValidationMiddleware(stack *middleware.Stack) er return stack.Initialize.Add(&validateOpCreateDbParameterGroup{}, middleware.After) } +func addOpDeleteDbClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteDbCluster{}, middleware.After) +} + func addOpDeleteDbInstanceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteDbInstance{}, middleware.After) } +func addOpGetDbClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetDbCluster{}, middleware.After) +} + func addOpGetDbInstanceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetDbInstance{}, middleware.After) } @@ -210,6 +322,10 @@ func addOpGetDbParameterGroupValidationMiddleware(stack *middleware.Stack) error return stack.Initialize.Add(&validateOpGetDbParameterGroup{}, middleware.After) } +func addOpListDbInstancesForClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListDbInstancesForCluster{}, middleware.After) +} + func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) } @@ -222,6 +338,10 @@ func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) } +func addOpUpdateDbClusterValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateDbCluster{}, middleware.After) +} + func addOpUpdateDbInstanceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdateDbInstance{}, middleware.After) } @@ -352,6 +472,44 @@ func validateS3Configuration(v *types.S3Configuration) error { } } +func validateOpCreateDbClusterInput(v *CreateDbClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateDbClusterInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Password == nil { + invalidParams.Add(smithy.NewErrParamRequired("Password")) + } + if len(v.DbInstanceType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("DbInstanceType")) + } + if v.AllocatedStorage == nil { + invalidParams.Add(smithy.NewErrParamRequired("AllocatedStorage")) + } + if v.VpcSubnetIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("VpcSubnetIds")) + } + if v.VpcSecurityGroupIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("VpcSecurityGroupIds")) + } + if len(v.DeploymentType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("DeploymentType")) + } + if v.LogDeliveryConfiguration != nil { + if err := validateLogDeliveryConfiguration(v.LogDeliveryConfiguration); err != nil { + invalidParams.AddNested("LogDeliveryConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateDbInstanceInput(v *CreateDbInstanceInput) error { if v == nil { return nil @@ -407,6 +565,21 @@ func validateOpCreateDbParameterGroupInput(v *CreateDbParameterGroupInput) error } } +func validateOpDeleteDbClusterInput(v *DeleteDbClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteDbClusterInput"} + if v.DbClusterId == nil { + invalidParams.Add(smithy.NewErrParamRequired("DbClusterId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteDbInstanceInput(v *DeleteDbInstanceInput) error { if v == nil { return nil @@ -422,6 +595,21 @@ func validateOpDeleteDbInstanceInput(v *DeleteDbInstanceInput) error { } } +func validateOpGetDbClusterInput(v *GetDbClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetDbClusterInput"} + if v.DbClusterId == nil { + invalidParams.Add(smithy.NewErrParamRequired("DbClusterId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetDbInstanceInput(v *GetDbInstanceInput) error { if v == nil { return nil @@ -452,6 +640,21 @@ func validateOpGetDbParameterGroupInput(v *GetDbParameterGroupInput) error { } } +func validateOpListDbInstancesForClusterInput(v *ListDbInstancesForClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListDbInstancesForClusterInput"} + if v.DbClusterId == nil { + invalidParams.Add(smithy.NewErrParamRequired("DbClusterId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { if v == nil { return nil @@ -503,6 +706,26 @@ func validateOpUntagResourceInput(v *UntagResourceInput) error { } } +func validateOpUpdateDbClusterInput(v *UpdateDbClusterInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateDbClusterInput"} + if v.DbClusterId == nil { + invalidParams.Add(smithy.NewErrParamRequired("DbClusterId")) + } + if v.LogDeliveryConfiguration != nil { + if err := validateLogDeliveryConfiguration(v.LogDeliveryConfiguration); err != nil { + invalidParams.AddNested("LogDeliveryConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpUpdateDbInstanceInput(v *UpdateDbInstanceInput) error { if v == nil { return nil