diff --git a/.changelog/33731.txt b/.changelog/33731.txt new file mode 100644 index 000000000000..67f032e3959d --- /dev/null +++ b/.changelog/33731.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_dms_event_subscription: `source_ids` and `source_type` are Required +``` \ No newline at end of file diff --git a/.changelog/34832.txt b/.changelog/34832.txt new file mode 100644 index 000000000000..0794acc706f7 --- /dev/null +++ b/.changelog/34832.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_finspace_kx_scaling_group +``` diff --git a/.changelog/34833.txt b/.changelog/34833.txt new file mode 100644 index 000000000000..e1e350824ea5 --- /dev/null +++ b/.changelog/34833.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_finspace_kx_volume +``` diff --git a/.changelog/34916.txt b/.changelog/34916.txt new file mode 100644 index 000000000000..9c96cfacdd47 --- /dev/null +++ b/.changelog/34916.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_polly_voices +``` diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index a4d456a37154..8d6db90f68ba 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint go 1.20 require ( - github.com/aws/aws-sdk-go v1.49.1 + github.com/aws/aws-sdk-go v1.49.2 github.com/bflad/tfproviderlint v0.29.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 golang.org/x/tools v0.13.0 diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 7375ababe782..8755e6de2fb8 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -8,8 +8,8 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= -github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.2 h1:+4BEcm1nPCoDbVd+gg8cdxpa1qJfrvnddy12vpEVWjw= +github.com/aws/aws-sdk-go v1.49.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.29.0 h1:zxKYAAM6IZ4ace1a3LX+uzMRIMP8L+iOtEc+FP2Yoow= diff --git a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index b3d8f8c2c94d..41a2711656a8 100644 --- a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -12007,6 +12007,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -34775,6 +34778,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index d681c1293264..3b328cf28be4 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -24,7 +24,7 @@ github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v15/textseg -# github.com/aws/aws-sdk-go v1.49.1 +# github.com/aws/aws-sdk-go v1.49.2 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/endpoints diff --git a/CHANGELOG.md b/CHANGELOG.md index 52e141270803..f41aab552508 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,11 @@ FEATURES: +* **New Data Source:** `aws_polly_voices` ([#34916](https://github.com/hashicorp/terraform-provider-aws/issues/34916)) * **New Data Source:** `aws_ssoadmin_application_assignments` ([#34796](https://github.com/hashicorp/terraform-provider-aws/issues/34796)) * **New Data Source:** `aws_ssoadmin_principal_application_assignments` ([#34815](https://github.com/hashicorp/terraform-provider-aws/issues/34815)) +* **New Resource:** `aws_finspace_kx_scaling_group` ([#34832](https://github.com/hashicorp/terraform-provider-aws/issues/34832)) +* **New Resource:** `aws_ssoadmin_trusted_token_issuer` ([#34839](https://github.com/hashicorp/terraform-provider-aws/issues/34839)) ENHANCEMENTS: @@ -30,6 +33,7 @@ BUG FIXES: * resource/aws_cloudwatch_log_group: Fix `invalid new value for .skip_destroy: was cty.False, but now null` errors ([#30354](https://github.com/hashicorp/terraform-provider-aws/issues/30354)) * resource/aws_cloudwatch_log_group: Remove default value (`STANDARD`) for `log_group_class` argument and mark as Computed. This fixes `InvalidParameterException: Only Standard log class is supported` errors in AWS Regions other than AWS Commercial ([#34812](https://github.com/hashicorp/terraform-provider-aws/issues/34812)) * resource/aws_db_instance: Fix error where Terraform loses track of resource if Blue/Green Deployment is applied outside of Terraform ([#34728](https://github.com/hashicorp/terraform-provider-aws/issues/34728)) +* resource/aws_dms_event_subscription: `source_ids` and `source_type` are Required ([#33731](https://github.com/hashicorp/terraform-provider-aws/issues/33731)) * resource/aws_ecr_pull_through_cache_rule: Fix plan time validation for `ecr_repository_prefix` ([#34716](https://github.com/hashicorp/terraform-provider-aws/issues/34716)) * resource/aws_lb: Correct in-place update of `security_groups` for Network Load Balancers when the new value is Computed ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) * resource/aws_lb: Fix `InvalidConfigurationRequest: Load balancer attribute key 'dns_record.client_routing_policy' is not supported on load balancers with type 'network'` errors on resource Create in AWS GovCloud (US) ([#34135](https://github.com/hashicorp/terraform-provider-aws/issues/34135)) diff --git a/go.mod b/go.mod index 3821b9ff612b..553689a5b389 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c github.com/YakDriver/regexache v0.23.0 - github.com/aws/aws-sdk-go v1.49.1 + github.com/aws/aws-sdk-go v1.49.2 github.com/aws/aws-sdk-go-v2 v1.24.0 github.com/aws/aws-sdk-go-v2/config v1.26.1 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 @@ -41,7 +41,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 - github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.5 + github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0 github.com/aws/aws-sdk-go-v2/service/fis v1.21.5 @@ -65,7 +65,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/oam v1.7.5 github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 - github.com/aws/aws-sdk-go-v2/service/pipes v1.9.5 + github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6 github.com/aws/aws-sdk-go-v2/service/polly v1.36.5 github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5 diff --git a/go.sum b/go.sum index 002e8c2e279f..1e073c94d703 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= -github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.2 h1:+4BEcm1nPCoDbVd+gg8cdxpa1qJfrvnddy12vpEVWjw= +github.com/aws/aws-sdk-go v1.49.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= @@ -108,8 +108,8 @@ github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 h1:LEYyWSnfdSSysPr5JWUkNwOD0MvX github.com/aws/aws-sdk-go-v2/service/eks v1.35.5/go.mod h1:L1uv3UgQlAkdM9v0gpec7nnfUiQkCnGMjBE7MJArfWQ= github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 h1:dZtEDpqYVg3i5oT8lSXxEsg6dInewHA3qNuyzHTvWck= github.com/aws/aws-sdk-go-v2/service/emr v1.35.5/go.mod h1:Drh6y2qLaw/wnDKTIcdqM2m358MIRXsZ2Bj2tjhVLq0= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.5 h1:IsLomor7ErBzqMCtI71gqTw0ENKbZxVhHMwSnDImbTw= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.5/go.mod h1:G2r5cqojvwkdJJx6NDxszEfHC8f02TF15dE/3bg8P9A= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 h1:O2ppygCppB40GS7lDJUX4dGEgEdsKkX62oIAGgre/rY= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6/go.mod h1:G2r5cqojvwkdJJx6NDxszEfHC8f02TF15dE/3bg8P9A= github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 h1:qMMMld3RbqxSZ5KEokAu+w4MGV9YlSvisJbk4iMO4m0= github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5/go.mod h1:ydI4dfZIWil2hOsneE1QWDOxY/CdC37oT96S4JOrD24= github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0 h1:n3TWZAn4gV2/GiJMnuNuSEkgyXHkKPEkenU5ZmmFS1o= @@ -168,8 +168,8 @@ github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 h1:V+zBQiUAATdw github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5/go.mod h1:Hky91JAG7y6hJrIoZ6IyJlB99+AFOPUIfqVQcZ+fbhY= github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 h1:u0FL7wY1ni4WQkpfUiBslPmwKOltziQkGg5njTpPH6M= github.com/aws/aws-sdk-go-v2/service/osis v1.6.5/go.mod h1:wRTpbH8h5d4SJmdsy9LNEuZNHrNtUCZMl+U1slAW4Ng= -github.com/aws/aws-sdk-go-v2/service/pipes v1.9.5 h1:BKJlKvRxWQCjd7UyZPLlvkvBDOf7UziF5spBSkMq3J4= -github.com/aws/aws-sdk-go-v2/service/pipes v1.9.5/go.mod h1:N3pAD/7GiKZAOBFFsF9BqWdSg33HM8ibXoAyPQXgcNI= +github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6 h1:cDjJ1OsUDDHP0DERFe+kon0awE0vMt+6xjd9zuOaOv8= +github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6/go.mod h1:N3pAD/7GiKZAOBFFsF9BqWdSg33HM8ibXoAyPQXgcNI= github.com/aws/aws-sdk-go-v2/service/polly v1.36.5 h1:/BHypWAWPEuwfnlb4hJz5R1uedDGNtorZgEHYtW/wI4= github.com/aws/aws-sdk-go-v2/service/polly v1.36.5/go.mod h1:mmQzyk89+rKEfieMV8gHoFoVmrPiyKjqORj2Uk5+O04= github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 h1:yJniPHxzGy0jtJNkXYTqI8ps587kl1Jf8Luz5K8Jxjs= diff --git a/internal/errs/diag.go b/internal/errs/diag.go index 7faf6b596c5b..4b3666632d34 100644 --- a/internal/errs/diag.go +++ b/internal/errs/diag.go @@ -68,13 +68,6 @@ func NewWarningDiagnostic(summary, detail string) diag.Diagnostic { } } -func FromAttributeError(path cty.Path, err error) diag.Diagnostic { - return withPath( - NewErrorDiagnostic(err.Error(), ""), - path, - ) -} - func withPath(d diag.Diagnostic, path cty.Path) diag.Diagnostic { d.AttributePath = path return d diff --git a/internal/framework/flex/list.go b/internal/framework/flex/list.go index 7199673e6ebb..5555cf51686a 100644 --- a/internal/framework/flex/list.go +++ b/internal/framework/flex/list.go @@ -10,9 +10,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" - "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" - fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - "github.com/hashicorp/terraform-provider-aws/internal/slices" ) func ExpandFrameworkStringList(ctx context.Context, v basetypes.ListValuable) []*string { @@ -86,52 +83,3 @@ func FlattenFrameworkStringValueListLegacy[T ~string](_ context.Context, vs []T) return types.ListValueMust(types.StringType, elems) } - -type FrameworkElementExpanderFunc[T any, U any] func(context.Context, T) U - -func ExpandFrameworkListNestedBlock[T any, U any](ctx context.Context, tfList types.List, f FrameworkElementExpanderFunc[T, U]) []U { - if tfList.IsNull() || tfList.IsUnknown() { - return nil - } - - var data []T - - _ = fwdiag.Must(0, tfList.ElementsAs(ctx, &data, false)) - - return slices.ApplyToAll(data, func(t T) U { - return f(ctx, t) - }) -} - -func ExpandFrameworkListNestedBlockPtr[T any, U any](ctx context.Context, tfList types.List, f FrameworkElementExpanderFunc[T, *U]) *U { - if tfList.IsNull() || tfList.IsUnknown() { - return nil - } - - var data []T - - _ = fwdiag.Must(0, tfList.ElementsAs(ctx, &data, false)) - - if len(data) == 0 { - return nil - } - - return f(ctx, data[0]) -} - -type FrameworkElementFlattenerFunc[T any, U any] func(context.Context, U) T - -func FlattenFrameworkListNestedBlock[T any, U any](ctx context.Context, apiObjects []U, f FrameworkElementFlattenerFunc[T, U]) types.List { - attributeTypes := fwtypes.AttributeTypesMust[T](ctx) - elementType := types.ObjectType{AttrTypes: attributeTypes} - - if len(apiObjects) == 0 { - return types.ListNull(elementType) - } - - data := slices.ApplyToAll(apiObjects, func(apiObject U) T { - return f(ctx, apiObject) - }) - - return fwdiag.Must(types.ListValueFrom(ctx, elementType, data)) -} diff --git a/internal/service/dms/consts.go b/internal/service/dms/consts.go index c33690fd7700..a3fa4d625826 100644 --- a/internal/service/dms/consts.go +++ b/internal/service/dms/consts.go @@ -174,3 +174,10 @@ func networkType_Values() []string { networkTypeIPv4, } } + +const ( + eventSubscriptionStatusActive = "active" + eventSubscriptionStatusCreating = "creating" + eventSubscriptionStatusDeleting = "deleting" + eventSubscriptionStatusModifying = "modifying" +) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index 6e8226ba0ba2..e9226398b07d 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -33,6 +34,7 @@ func ResourceEventSubscription() *schema.Resource { ReadWithoutTimeout: resourceEventSubscriptionRead, UpdateWithoutTimeout: resourceEventSubscriptionUpdate, DeleteWithoutTimeout: resourceEventSubscriptionDelete, + Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(10 * time.Minute), Delete: schema.DefaultTimeout(10 * time.Minute), @@ -56,7 +58,6 @@ func ResourceEventSubscription() *schema.Resource { "event_categories": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, Required: true, }, "name": { @@ -73,14 +74,12 @@ func ResourceEventSubscription() *schema.Resource { "source_ids": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Required: true, ForceNew: true, - Optional: true, }, "source_type": { Type: schema.TypeString, - Optional: true, - // The API suppors modification but doing so loses all source_ids + Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ "replication-instance", @@ -99,42 +98,27 @@ func resourceEventSubscriptionCreate(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - request := &dms.CreateEventSubscriptionInput{ + name := d.Get("name").(string) + input := &dms.CreateEventSubscriptionInput{ Enabled: aws.Bool(d.Get("enabled").(bool)), + EventCategories: flex.ExpandStringSet(d.Get("event_categories").(*schema.Set)), SnsTopicArn: aws.String(d.Get("sns_topic_arn").(string)), - SubscriptionName: aws.String(d.Get("name").(string)), + SourceIds: flex.ExpandStringSet(d.Get("source_ids").(*schema.Set)), SourceType: aws.String(d.Get("source_type").(string)), + SubscriptionName: aws.String(name), Tags: getTagsIn(ctx), } - if v, ok := d.GetOk("event_categories"); ok { - request.EventCategories = flex.ExpandStringSet(v.(*schema.Set)) - } - - if v, ok := d.GetOk("source_ids"); ok { - request.SourceIds = flex.ExpandStringSet(v.(*schema.Set)) - } - - _, err := conn.CreateEventSubscriptionWithContext(ctx, request) + _, err := conn.CreateEventSubscriptionWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating DMS Event Subscription (%s): %s", d.Get("name").(string), err) + return sdkdiag.AppendErrorf(diags, "creating DMS Event Subscription (%s): %s", name, err) } - d.SetId(d.Get("name").(string)) - - stateConf := &retry.StateChangeConf{ - Pending: []string{"creating", "modifying"}, - Target: []string{"active"}, - Refresh: resourceEventSubscriptionStateRefreshFunc(ctx, conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 10 * time.Second, - } + d.SetId(name) - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) creation: %s", d.Id(), err) + if _, err := waitEventSubscriptionCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) create: %s", d.Id(), err) } return append(diags, resourceEventSubscriptionRead(ctx, d, meta)...) @@ -144,30 +128,18 @@ func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - request := &dms.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(d.Id()), - } - - response, err := conn.DescribeEventSubscriptionsWithContext(ctx, request) + subscription, err := FindEventSubscriptionByName(ctx, conn, d.Id()) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - log.Printf("[WARN] DMS event subscription (%s) not found, removing from state", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] DMS Event Subscription (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading DMS event subscription: %s", err) - } - - if response == nil || len(response.EventSubscriptionsList) == 0 || response.EventSubscriptionsList[0] == nil { - log.Printf("[WARN] DMS event subscription (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags + return sdkdiag.AppendErrorf(diags, "reading DMS Event Subscription (%s): %s", d.Id(), err) } - subscription := response.EventSubscriptionsList[0] - arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, Service: "dms", @@ -176,13 +148,12 @@ func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, Resource: fmt.Sprintf("es:%s", d.Id()), }.String() d.Set("arn", arn) - d.Set("enabled", subscription.Enabled) + d.Set("event_categories", aws.StringValueSlice(subscription.EventCategoriesList)) + d.Set("name", d.Id()) d.Set("sns_topic_arn", subscription.SnsTopicArn) + d.Set("source_ids", aws.StringValueSlice(subscription.SourceIdsList)) d.Set("source_type", subscription.SourceType) - d.Set("name", d.Id()) - d.Set("event_categories", flex.FlattenStringList(subscription.EventCategoriesList)) - d.Set("source_ids", flex.FlattenStringList(subscription.SourceIdsList)) return diags } @@ -191,36 +162,23 @@ func resourceEventSubscriptionUpdate(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - if d.HasChanges("enabled", "event_categories", "sns_topic_arn", "source_type") { - request := &dms.ModifyEventSubscriptionInput{ + if d.HasChangesExcept("tags", "tags_all") { + input := &dms.ModifyEventSubscriptionInput{ Enabled: aws.Bool(d.Get("enabled").(bool)), + EventCategories: flex.ExpandStringSet(d.Get("event_categories").(*schema.Set)), SnsTopicArn: aws.String(d.Get("sns_topic_arn").(string)), - SubscriptionName: aws.String(d.Get("name").(string)), SourceType: aws.String(d.Get("source_type").(string)), + SubscriptionName: aws.String(d.Id()), } - if v, ok := d.GetOk("event_categories"); ok { - request.EventCategories = flex.ExpandStringSet(v.(*schema.Set)) - } - - _, err := conn.ModifyEventSubscriptionWithContext(ctx, request) + _, err := conn.ModifyEventSubscriptionWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating DMS Event Subscription (%s): %s", d.Id(), err) - } - - stateConf := &retry.StateChangeConf{ - Pending: []string{"modifying"}, - Target: []string{"active"}, - Refresh: resourceEventSubscriptionStateRefreshFunc(ctx, conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - MinTimeout: 10 * time.Second, - Delay: 10 * time.Second, + return sdkdiag.AppendErrorf(diags, "modifying DMS Event Subscription (%s): %s", d.Id(), err) } - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) modification: %s", d.Id(), err) + if _, err := waitEventSubscriptionUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) update: %s", d.Id(), err) } } @@ -231,11 +189,10 @@ func resourceEventSubscriptionDelete(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - request := &dms.DeleteEventSubscriptionInput{ + log.Printf("[DEBUG] Deleting DMS Event Subscription: %s", d.Id()) + _, err := conn.DeleteEventSubscriptionWithContext(ctx, &dms.DeleteEventSubscriptionInput{ SubscriptionName: aws.String(d.Id()), - } - - _, err := conn.DeleteEventSubscriptionWithContext(ctx, request) + }) if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { return diags @@ -245,30 +202,67 @@ func resourceEventSubscriptionDelete(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "deleting DMS Event Subscription (%s): %s", d.Id(), err) } - stateConf := &retry.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{}, - Refresh: resourceEventSubscriptionStateRefreshFunc(ctx, conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 10 * time.Second, - Delay: 10 * time.Second, + if _, err := waitEventSubscriptionDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) delete: %s", d.Id(), err) } - _, err = stateConf.WaitForStateContext(ctx) + return diags +} + +func FindEventSubscriptionByName(ctx context.Context, conn *dms.DatabaseMigrationService, name string) (*dms.EventSubscription, error) { + input := &dms.DescribeEventSubscriptionsInput{ + SubscriptionName: aws.String(name), + } + + return findEventSubscription(ctx, conn, input) +} + +func findEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEventSubscriptionsInput) (*dms.EventSubscription, error) { + output, err := findEventSubscriptions(ctx, conn, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) deletion: %s", d.Id(), err) + return nil, err } - return diags + return tfresource.AssertSinglePtrResult(output) } -func resourceEventSubscriptionStateRefreshFunc(ctx context.Context, conn *dms.DatabaseMigrationService, name string) retry.StateRefreshFunc { +func findEventSubscriptions(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEventSubscriptionsInput) ([]*dms.EventSubscription, error) { + var output []*dms.EventSubscription + + err := conn.DescribeEventSubscriptionsPagesWithContext(ctx, input, func(page *dms.DescribeEventSubscriptionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.EventSubscriptionsList { + if v != nil { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} + +func statusEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationService, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - v, err := conn.DescribeEventSubscriptionsWithContext(ctx, &dms.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(name), - }) + output, err := FindEventSubscriptionByName(ctx, conn, name) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if tfresource.NotFound(err) { return nil, "", nil } @@ -276,10 +270,63 @@ func resourceEventSubscriptionStateRefreshFunc(ctx context.Context, conn *dms.Da return nil, "", err } - if v == nil || len(v.EventSubscriptionsList) == 0 || v.EventSubscriptionsList[0] == nil { - return nil, "", nil - } + return output, aws.StringValue(output.Status), nil + } +} - return v, aws.StringValue(v.EventSubscriptionsList[0].Status), nil +func waitEventSubscriptionCreated(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eventSubscriptionStatusCreating, eventSubscriptionStatusModifying}, + Target: []string{eventSubscriptionStatusActive}, + Refresh: statusEventSubscription(ctx, conn, name), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.EventSubscription); ok { + return output, err + } + + return nil, err +} + +func waitEventSubscriptionUpdated(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eventSubscriptionStatusModifying}, + Target: []string{eventSubscriptionStatusActive}, + Refresh: statusEventSubscription(ctx, conn, name), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.EventSubscription); ok { + return output, err + } + + return nil, err +} + +func waitEventSubscriptionDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eventSubscriptionStatusDeleting}, + Target: []string{}, + Refresh: statusEventSubscription(ctx, conn, name), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.EventSubscription); ok { + return output, err + } + + return nil, err } diff --git a/internal/service/dms/event_subscription_test.go b/internal/service/dms/event_subscription_test.go index e222bd4068a2..45f3d0c15c67 100644 --- a/internal/service/dms/event_subscription_test.go +++ b/internal/service/dms/event_subscription_test.go @@ -8,15 +8,14 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccDMSEventSubscription_basic(t *testing.T) { @@ -215,11 +214,9 @@ func testAccCheckEventSubscriptionDestroy(ctx context.Context) resource.TestChec conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) - resp, err := conn.DescribeEventSubscriptionsWithContext(ctx, &dms.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(rs.Primary.ID), - }) + _, err := tfdms.FindEventSubscriptionByName(ctx, conn, rs.Primary.ID) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if tfresource.NotFound(err) { continue } @@ -227,78 +224,38 @@ func testAccCheckEventSubscriptionDestroy(ctx context.Context) resource.TestChec return err } - if resp != nil && len(resp.EventSubscriptionsList) > 0 { - return fmt.Errorf("DMS event subscription still exists: %s", rs.Primary.ID) - } + return fmt.Errorf("DMS Event Subscription %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckEventSubscriptionExists(ctx context.Context, n string, eventSubscription *dms.EventSubscription) resource.TestCheckFunc { +func testAccCheckEventSubscriptionExists(ctx context.Context, n string, v *dms.EventSubscription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) - resp, err := conn.DescribeEventSubscriptionsWithContext(ctx, &dms.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(rs.Primary.ID), - }) - if err != nil { - return fmt.Errorf("DMS event subscription error: %v", err) - } + output, err := tfdms.FindEventSubscriptionByName(ctx, conn, rs.Primary.ID) - if resp == nil || len(resp.EventSubscriptionsList) == 0 || resp.EventSubscriptionsList[0] == nil { - return fmt.Errorf("DMS event subscription not found") + if err != nil { + return err } - *eventSubscription = *resp.EventSubscriptionsList[0] + *v = *output return nil } } -func testAccEventSubscriptionConfigBase(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - +func testAccEventSubscriptionConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` data "aws_partition" "current" {} -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test" { - count = 2 - - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = "10.1.${count.index}.0/24" - vpc_id = aws_vpc.test.id - - tags = { - Name = aws_vpc.test.tags["Name"] - } -} - resource "aws_dms_replication_subnet_group" "test" { replication_subnet_group_description = %[1]q replication_subnet_group_id = %[1]q @@ -315,13 +272,11 @@ resource "aws_dms_replication_instance" "test" { resource "aws_sns_topic" "test" { name = %[1]q } -`, rName) +`, rName)) } func testAccEventSubscriptionConfig_enabled(rName string, enabled bool) string { - return acctest.ConfigCompose( - testAccEventSubscriptionConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccEventSubscriptionConfig_base(rName), fmt.Sprintf(` resource "aws_dms_event_subscription" "test" { name = %[1]q enabled = %[2]t @@ -334,9 +289,7 @@ resource "aws_dms_event_subscription" "test" { } func testAccEventSubscriptionConfig_categories2(rName string, eventCategory1 string, eventCategory2 string) string { - return acctest.ConfigCompose( - testAccEventSubscriptionConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccEventSubscriptionConfig_base(rName), fmt.Sprintf(` resource "aws_dms_event_subscription" "test" { name = %[1]q enabled = false @@ -349,9 +302,7 @@ resource "aws_dms_event_subscription" "test" { } func testAccEventSubscriptionConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose( - testAccEventSubscriptionConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccEventSubscriptionConfig_base(rName), fmt.Sprintf(` resource "aws_dms_event_subscription" "test" { name = %[1]q enabled = true @@ -368,9 +319,7 @@ resource "aws_dms_event_subscription" "test" { } func testAccEventSubscriptionConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose( - testAccEventSubscriptionConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccEventSubscriptionConfig_base(rName), fmt.Sprintf(` resource "aws_dms_event_subscription" "test" { name = %[1]q enabled = true diff --git a/internal/service/dynamodb/table_item.go b/internal/service/dynamodb/table_item.go index fae285cb8eee..344807ef2d9b 100644 --- a/internal/service/dynamodb/table_item.go +++ b/internal/service/dynamodb/table_item.go @@ -8,7 +8,6 @@ import ( "fmt" "log" "reflect" - "regexp" "strings" "github.com/aws/aws-sdk-go/aws" @@ -270,35 +269,6 @@ func FindTableItem(ctx context.Context, conn *dynamodb.DynamoDB, tableName strin return out, nil } -func BuildExpressionAttributeNames(attrs map[string]*dynamodb.AttributeValue) map[string]*string { - names := map[string]*string{} - - for key := range attrs { - names["#a_"+cleanKeyName(key)] = aws.String(key) - } - - log.Printf("[DEBUG] ExpressionAttributeNames: %+v", names) - return names -} - -func cleanKeyName(key string) string { - reg, err := regexp.Compile("[A-Za-z^]+") // suspect regexp - if err != nil { - log.Printf("[ERROR] clean keyname errored %v", err) - } - return reg.ReplaceAllString(key, "") -} - -func BuildProjectionExpression(attrs map[string]*dynamodb.AttributeValue) *string { - keys := []string{} - - for key := range attrs { - keys = append(keys, cleanKeyName(key)) - } - log.Printf("[DEBUG] ProjectionExpressions: %+v", strings.Join(keys, ", #a_")) - return aws.String("#a_" + strings.Join(keys, ", #a_")) -} - func buildTableItemID(tableName string, hashKey string, rangeKey string, attrs map[string]*dynamodb.AttributeValue) string { id := []string{tableName, hashKey} diff --git a/internal/service/ec2/status.go b/internal/service/ec2/status.go index be8ece11bb42..67f40990dc78 100644 --- a/internal/service/ec2/status.go +++ b/internal/service/ec2/status.go @@ -960,38 +960,6 @@ func StatusVolumeModificationState(ctx context.Context, conn *ec2.EC2, id string } } -func StatusVPCState(ctx context.Context, conn *ec2.EC2, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindVPCByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.State), nil - } -} - -func StatusVPCAttributeValue(ctx context.Context, conn *ec2.EC2, id string, attribute string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - attributeValue, err := FindVPCAttribute(ctx, conn, id, attribute) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return attributeValue, strconv.FormatBool(attributeValue), nil - } -} - func StatusVPCCIDRBlockAssociationState(ctx context.Context, conn *ec2.EC2, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, _, err := FindVPCCIDRBlockAssociationByID(ctx, conn, id) diff --git a/internal/service/ec2/wait.go b/internal/service/ec2/wait.go index bea1573b898b..8319a984981a 100644 --- a/internal/service/ec2/wait.go +++ b/internal/service/ec2/wait.go @@ -1667,41 +1667,6 @@ const ( vpcDeletedTimeout = 5 * time.Minute ) -func WaitVPCCreated(ctx context.Context, conn *ec2.EC2, id string) (*ec2.Vpc, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ec2.VpcStatePending}, - Target: []string{ec2.VpcStateAvailable}, - Refresh: StatusVPCState(ctx, conn, id), - Timeout: vpcCreatedTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*ec2.Vpc); ok { - return output, err - } - - return nil, err -} - -func WaitVPCAttributeUpdated(ctx context.Context, conn *ec2.EC2, vpcID string, attribute string, expectedValue bool) (*ec2.Vpc, error) { - stateConf := &retry.StateChangeConf{ - Target: []string{strconv.FormatBool(expectedValue)}, - Refresh: StatusVPCAttributeValue(ctx, conn, vpcID, attribute), - Timeout: ec2PropagationTimeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*ec2.Vpc); ok { - return output, err - } - - return nil, err -} - func WaitVPCCIDRBlockAssociationCreated(ctx context.Context, conn *ec2.EC2, id string, timeout time.Duration) (*ec2.VpcCidrBlockState, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ec2.VpcCidrBlockStateCodeAssociating, ec2.VpcCidrBlockStateCodeDisassociated, ec2.VpcCidrBlockStateCodeFailing}, diff --git a/internal/service/elasticache/parameter_group.go b/internal/service/elasticache/parameter_group.go index a8423f2c68a7..9fe188ece781 100644 --- a/internal/service/elasticache/parameter_group.go +++ b/internal/service/elasticache/parameter_group.go @@ -409,20 +409,6 @@ func FlattenParameters(list []*elasticache.Parameter) []map[string]interface{} { return result } -// Takes the result of flatmap.Expand for an array of parameters and -// returns Parameter API compatible objects -func ExpandParameters(configured []interface{}) []*elasticache.ParameterNameValue { - parameters := make([]*elasticache.ParameterNameValue, len(configured)) - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for i, pRaw := range configured { - parameters[i] = expandParameter(pRaw.(map[string]interface{})) - } - - return parameters -} - func expandParameter(param map[string]interface{}) *elasticache.ParameterNameValue { return &elasticache.ParameterNameValue{ ParameterName: aws.String(param["name"].(string)), diff --git a/internal/service/elasticache/parameter_group_test.go b/internal/service/elasticache/parameter_group_test.go index b4b0e93762c1..4e6cf8069ee6 100644 --- a/internal/service/elasticache/parameter_group_test.go +++ b/internal/service/elasticache/parameter_group_test.go @@ -620,31 +620,6 @@ func TestFlattenParameters(t *testing.T) { } } -func TestExpandParameters(t *testing.T) { - t.Parallel() - - expanded := []interface{}{ - map[string]interface{}{ - "name": "activerehashing", - "value": "yes", - "apply_method": "immediate", - }, - } - parameters := tfelasticache.ExpandParameters(expanded) - - expected := &elasticache.ParameterNameValue{ - ParameterName: aws.String("activerehashing"), - ParameterValue: aws.String("yes"), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - func TestParameterChanges(t *testing.T) { t.Parallel() diff --git a/internal/service/finspace/kx_scaling_group.go b/internal/service/finspace/kx_scaling_group.go new file mode 100644 index 000000000000..852fed8de75b --- /dev/null +++ b/internal/service/finspace/kx_scaling_group.go @@ -0,0 +1,292 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace + +import ( + "context" + "errors" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_finspace_kx_scaling_group", name="Kx Scaling Group") +// @Tags(identifierAttribute="arn") +func ResourceKxScalingGroup() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceKxScalingGroupCreate, + ReadWithoutTimeout: resourceKxScalingGroupRead, + UpdateWithoutTimeout: resourceKxScalingGroupUpdate, + DeleteWithoutTimeout: resourceKxScalingGroupDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "availability_zone_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "host_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "clusters": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "status_reason": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameKxScalingGroup = "Kx Scaling Group" + kxScalingGroupIDPartCount = 2 +) + +func resourceKxScalingGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + environmentId := d.Get("environment_id").(string) + scalingGroupName := d.Get("name").(string) + idParts := []string{ + environmentId, + scalingGroupName, + } + rID, err := flex.FlattenResourceId(idParts, kxScalingGroupIDPartCount, false) + if err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxScalingGroup, d.Get("name").(string), err) + } + d.SetId(rID) + + in := &finspace.CreateKxScalingGroupInput{ + EnvironmentId: aws.String(environmentId), + ScalingGroupName: aws.String(scalingGroupName), + HostType: aws.String(d.Get("host_type").(string)), + AvailabilityZoneId: aws.String(d.Get("availability_zone_id").(string)), + Tags: getTagsIn(ctx), + } + + out, err := conn.CreateKxScalingGroup(ctx, in) + if err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), err) + } + + if out == nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), errors.New("empty output")) + } + + if _, err := waitKxScalingGroupCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxScalingGroup, d.Id(), err) + } + + return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) +} + +func resourceKxScalingGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + out, err := FindKxScalingGroupById(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FinSpace KxScalingGroup (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxScalingGroup, d.Id(), err) + } + d.Set("arn", out.ScalingGroupArn) + d.Set("status", out.Status) + d.Set("status_reason", out.StatusReason) + d.Set("created_timestamp", out.CreatedTimestamp.String()) + d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) + d.Set("name", out.ScalingGroupName) + d.Set("availability_zone_id", out.AvailabilityZoneId) + d.Set("host_type", out.HostType) + d.Set("clusters", out.Clusters) + + parts, err := flex.ExpandResourceId(d.Id(), kxUserIDPartCount, false) + if err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxScalingGroup, d.Id(), err) + } + d.Set("environment_id", parts[0]) + + return diags +} + +func resourceKxScalingGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + // Tags only. + return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) +} + +func resourceKxScalingGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + log.Printf("[INFO] Deleting FinSpace KxScalingGroup %s", d.Id()) + _, err := conn.DeleteKxScalingGroup(ctx, &finspace.DeleteKxScalingGroupInput{ + ScalingGroupName: aws.String(d.Get("name").(string)), + EnvironmentId: aws.String(d.Get("environment_id").(string)), + }) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return diags + } + + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionDeleting, ResNameKxScalingGroup, d.Id(), err) + } + + _, err = waitKxScalingGroupDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil && !tfresource.NotFound(err) { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxScalingGroup, d.Id(), err) + } + + return diags +} + +func FindKxScalingGroupById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxScalingGroupOutput, error) { + parts, err := flex.ExpandResourceId(id, kxScalingGroupIDPartCount, false) + if err != nil { + return nil, err + } + in := &finspace.GetKxScalingGroupInput{ + EnvironmentId: aws.String(parts[0]), + ScalingGroupName: aws.String(parts[1]), + } + + out, err := conn.GetKxScalingGroup(ctx, in) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil || out.ScalingGroupName == nil { + return nil, tfresource.NewEmptyResultError(in) + } + return out, nil +} + +func waitKxScalingGroupCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxScalingGroupStatusCreating), + Target: enum.Slice(types.KxScalingGroupStatusActive), + Refresh: statusKxScalingGroup(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { + return out, err + } + + return nil, err +} + +func waitKxScalingGroupDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxScalingGroupStatusDeleting), + Target: enum.Slice(types.KxScalingGroupStatusDeleted), + Refresh: statusKxScalingGroup(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { + return out, err + } + + return nil, err +} + +func statusKxScalingGroup(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := FindKxScalingGroupById(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go new file mode 100644 index 000000000000..b3ee14078939 --- /dev/null +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -0,0 +1,356 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccFinSpaceKxScalingGroup_basic(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var scalingGroup finspace.GetKxScalingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_scaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxScalingGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxScalingGroupConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxScalingGroupStatusActive)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFinSpaceKxScalingGroup_disappears(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var scalingGroup finspace.GetKxScalingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_scaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxScalingGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxScalingGroupConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxScalingGroup(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccFinSpaceKxScalingGroup_tags(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var scalingGroup finspace.GetKxScalingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_scaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxScalingGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxScalingGroupConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccKxScalingGroupConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccKxScalingGroupConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func testAccCheckKxScalingGroupDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_finspace_kx_scaling_group" { + continue + } + + _, err := tffinspace.FindKxScalingGroupById(ctx, conn, rs.Primary.ID) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err + } + + return create.Error(names.FinSpace, create.ErrActionCheckingDestroyed, tffinspace.ResNameKxScalingGroup, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckKxScalingGroupExists(ctx context.Context, name string, scalingGroup *finspace.GetKxScalingGroupOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + resp, err := tffinspace.FindKxScalingGroupById(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, rs.Primary.ID, err) + } + + *scalingGroup = *resp + + return nil + } +} + +func testAccKxScalingGroupConfigBase(rName string) string { + return fmt.Sprintf(` +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +output "account_id" { + value = data.aws_caller_identity.current.account_id +} + +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} + +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} + +data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } +} + +resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json +} + +resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} + +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id +} +`, rName) +} + +func testAccKxScalingGroupConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxScalingGroupConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" +} +`, rName)) +} + +func testAccKxScalingGroupConfig_tags1(rName, key1, value1 string) string { + return acctest.ConfigCompose( + testAccKxScalingGroupConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + + tags = { + %[2]q = %[3]q + } +} +`, rName, key1, value1)) +} + +func testAccKxScalingGroupConfig_tags2(rName, key1, value1, key2, value2 string) string { + return acctest.ConfigCompose( + testAccKxScalingGroupConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, key1, value1, key2, value2)) +} diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go new file mode 100644 index 000000000000..0446eb223cea --- /dev/null +++ b/internal/service/finspace/kx_volume.go @@ -0,0 +1,500 @@ +// // Copyright (c) HashiCorp, Inc. +// // SPDX-License-Identifier: MPL-2.0 +package finspace + +import ( + "context" + "errors" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_finspace_kx_volume", name="Kx Volume") +// @Tags(identifierAttribute="arn") +func ResourceKxVolume() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceKxVolumeCreate, + ReadWithoutTimeout: resourceKxVolumeRead, + UpdateWithoutTimeout: resourceKxVolumeUpdate, + DeleteWithoutTimeout: resourceKxVolumeDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "attached_clusters": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cluster_status": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), + }, + "cluster_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterType](), + }, + }, + }, + Computed: true, + }, + "availability_zones": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Required: true, + ForceNew: true, + }, + "az_mode": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxAzMode](), + }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "nas1_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1200, 33600), + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxNAS1Type](), + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "status_reason": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + }, + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameKxVolume = "Kx Volume" + kxVolumeIDPartCount = 2 +) + +func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + environmentId := d.Get("environment_id").(string) + volumeName := d.Get("name").(string) + idParts := []string{ + environmentId, + volumeName, + } + rID, err := flex.FlattenResourceId(idParts, kxVolumeIDPartCount, false) + if err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err) + } + d.SetId(rID) + + in := &finspace.CreateKxVolumeInput{ + ClientToken: aws.String(id.UniqueId()), + AvailabilityZoneIds: flex.ExpandStringValueList(d.Get("availability_zones").([]interface{})), + EnvironmentId: aws.String(environmentId), + VolumeType: types.KxVolumeType(d.Get("type").(string)), + VolumeName: aws.String(volumeName), + AzMode: types.KxAzMode(d.Get("az_mode").(string)), + Tags: getTagsIn(ctx), + } + + if v, ok := d.GetOk("description"); ok { + in.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("nas1_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) + } + + out, err := conn.CreateKxVolume(ctx, in) + if err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err) + } + + if out == nil || out.VolumeName == nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output")) + } + + if _, err := waitKxVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err) + } + + // The CreateKxVolume API currently fails to tag the Volume when the + // Tags field is set. Until the API is fixed, tag after creation instead. + if err := createTags(ctx, conn, aws.ToString(out.VolumeArn), getTagsIn(ctx)); err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err) + } + + return append(diags, resourceKxVolumeRead(ctx, d, meta)...) +} + +func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + out, err := FindKxVolumeByID(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FinSpace KxVolume (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err) + } + + d.Set("arn", out.VolumeArn) + d.Set("name", out.VolumeName) + d.Set("description", out.Description) + d.Set("type", out.VolumeType) + d.Set("status", out.Status) + d.Set("status_reason", out.StatusReason) + d.Set("az_mode", out.AzMode) + d.Set("description", out.Description) + d.Set("created_timestamp", out.CreatedTimestamp.String()) + d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) + d.Set("availability_zones", aws.StringSlice(out.AvailabilityZoneIds)) + + if err := d.Set("nas1_configuration", flattenNas1Configuration(out.Nas1Configuration)); err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) + } + + if err := d.Set("attached_clusters", flattenAttachedClusters(out.AttachedClusters)); err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) + } + + parts, err := flex.ExpandResourceId(d.Id(), kxVolumeIDPartCount, false) + if err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) + } + d.Set("environment_id", parts[0]) + + return diags +} + +func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + updateVolume := false + + in := &finspace.UpdateKxVolumeInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + VolumeName: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("description"); ok && d.HasChanges("description") { + in.Description = aws.String(v.(string)) + updateVolume = true + } + + if v, ok := d.GetOk("nas1_configuration"); ok && len(v.([]interface{})) > 0 && d.HasChanges("nas1_configuration") { + in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) + updateVolume = true + } + + if !updateVolume { + return diags + } + + log.Printf("[DEBUG] Updating FinSpace KxVolume (%s): %#v", d.Id(), in) + + if _, err := conn.UpdateKxVolume(ctx, in); err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err) + } + if _, err := waitKxVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err) + } + + return append(diags, resourceKxVolumeRead(ctx, d, meta)...) +} + +func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + log.Printf("[INFO] Deleting FinSpace Kx Volume: %s", d.Id()) + _, err := conn.DeleteKxVolume(ctx, &finspace.DeleteKxVolumeInput{ + VolumeName: aws.String(d.Get("name").(string)), + EnvironmentId: aws.String(d.Get("environment_id").(string)), + }) + + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return diags + } + + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err) + } + + _, err = waitKxVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil && !tfresource.NotFound(err) { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err) + } + + return diags +} + +func waitKxVolumeCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusCreating), + Target: enum.Slice(types.KxVolumeStatusActive), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusCreating, types.KxVolumeStatusUpdating), + Target: enum.Slice(types.KxVolumeStatusActive), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func waitKxVolumeDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusDeleting), + Target: enum.Slice(types.KxVolumeStatusDeleted), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := FindKxVolumeByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func FindKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { + parts, err := flex.ExpandResourceId(id, kxVolumeIDPartCount, false) + if err != nil { + return nil, err + } + + in := &finspace.GetKxVolumeInput{ + EnvironmentId: aws.String(parts[0]), + VolumeName: aws.String(parts[1]), + } + + out, err := conn.GetKxVolume(ctx, in) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil || out.VolumeArn == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +func expandNas1Configuration(tfList []interface{}) *types.KxNAS1Configuration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.KxNAS1Configuration{} + + if v, ok := tfMap["size"].(int); ok && v != 0 { + a.Size = aws.Int32(int32(v)) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + a.Type = types.KxNAS1Type(v) + } + return a +} + +func flattenNas1Configuration(apiObject *types.KxNAS1Configuration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.Size; v != nil { + m["size"] = aws.ToInt32(v) + } + + if v := apiObject.Type; v != "" { + m["type"] = v + } + + return []interface{}{m} +} + +func flattenCluster(apiObject *types.KxAttachedCluster) map[string]interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.ClusterName; aws.ToString(v) != "" { + m["cluster_name"] = aws.ToString(v) + } + + if v := apiObject.ClusterStatus; v != "" { + m["cluster_status"] = string(v) + } + + if v := apiObject.ClusterType; v != "" { + m["cluster_type"] = string(v) + } + + return m +} + +func flattenAttachedClusters(apiObjects []types.KxAttachedCluster) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var l []interface{} + + for _, apiObject := range apiObjects { + l = append(l, flattenCluster(&apiObject)) + } + + return l +} diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go new file mode 100644 index 000000000000..fa80b8b039a5 --- /dev/null +++ b/internal/service/finspace/kx_volume_test.go @@ -0,0 +1,266 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccFinSpaceKxVolume_basic(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var volume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &volume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxVolumeStatusActive)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFinSpaceKxVolume_disappears(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var volume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &volume), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxVolume(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_finspace_kx_volume" { + continue + } + + _, err := tffinspace.FindKxVolumeByID(ctx, conn, rs.Primary.ID) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err + } + + return create.Error(names.FinSpace, create.ErrActionCheckingDestroyed, tffinspace.ResNameKxVolume, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckKxVolumeExists(ctx context.Context, name string, volume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + resp, err := tffinspace.FindKxVolumeByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) + } + + *volume = *resp + + return nil + } +} + +func testAccKxVolumeConfigBase(rName string) string { + return fmt.Sprintf(` +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} + +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} + +data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } +} + +resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json +} + +resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} + +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id +} +`, rName) +} + +func testAccKxVolumeConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type = "SSD_250" + size = 1200 + } +} +`, rName)) +} diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index b34d275706c8..233a0b93e689 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -44,6 +44,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxDataview, + TypeName: "aws_finspace_kx_dataview", + Name: "Kx Dataview", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, { Factory: ResourceKxEnvironment, TypeName: "aws_finspace_kx_environment", @@ -52,6 +60,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxScalingGroup, + TypeName: "aws_finspace_kx_scaling_group", + Name: "Kx Scaling Group", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, { Factory: ResourceKxUser, TypeName: "aws_finspace_kx_user", @@ -61,9 +77,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceKxDataview, - TypeName: "aws_finspace_kx_dataview", - Name: "Kx Dataview", + Factory: ResourceKxVolume, + TypeName: "aws_finspace_kx_volume", + Name: "Kx Volume", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: "arn", }, diff --git a/internal/service/iam/arn.go b/internal/service/iam/arn.go deleted file mode 100644 index 1abf14baf02b..000000000000 --- a/internal/service/iam/arn.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iam - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/arn" -) - -const ( - ARNSeparator = "/" - ARNService = "iam" - - InstanceProfileResourcePrefix = "instance-profile" -) - -// InstanceProfileARNToName converts Amazon Resource Name (ARN) to Name. -func InstanceProfileARNToName(inputARN string) (string, error) { - parsedARN, err := arn.Parse(inputARN) - - if err != nil { - return "", fmt.Errorf("parsing ARN (%s): %w", inputARN, err) - } - - if actual, expected := parsedARN.Service, ARNService; actual != expected { - return "", fmt.Errorf("expected service %s in ARN (%s), got: %s", expected, inputARN, actual) - } - - resourceParts := strings.Split(parsedARN.Resource, ARNSeparator) - - if actual, expected := len(resourceParts), 2; actual < expected { - return "", fmt.Errorf("expected at least %d resource parts in ARN (%s), got: %d", expected, inputARN, actual) - } - - if actual, expected := resourceParts[0], InstanceProfileResourcePrefix; actual != expected { - return "", fmt.Errorf("expected resource prefix %s in ARN (%s), got: %s", expected, inputARN, actual) - } - - return resourceParts[len(resourceParts)-1], nil -} diff --git a/internal/service/iam/arn_test.go b/internal/service/iam/arn_test.go deleted file mode 100644 index ba07c02b7c21..000000000000 --- a/internal/service/iam/arn_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iam_test - -import ( - "regexp" - "testing" - - "github.com/YakDriver/regexache" - tfiam "github.com/hashicorp/terraform-provider-aws/internal/service/iam" -) - -func TestInstanceProfileARNToName(t *testing.T) { - t.Parallel() - - testCases := []struct { - TestName string - InputARN string - ExpectedError *regexp.Regexp - ExpectedName string - }{ - { - TestName: "empty ARN", - InputARN: "", - ExpectedError: regexache.MustCompile(`parsing ARN`), - }, - { - TestName: "unparsable ARN", - InputARN: "test", - ExpectedError: regexache.MustCompile(`parsing ARN`), - }, - { - TestName: "invalid ARN service", - InputARN: "arn:aws:ec2:us-east-1:123456789012:instance/i-12345678", //lintignore:AWSAT003,AWSAT005 - ExpectedError: regexache.MustCompile(`expected service iam`), - }, - { - TestName: "invalid ARN resource parts", - InputARN: "arn:aws:iam:us-east-1:123456789012:name", //lintignore:AWSAT003,AWSAT005 - ExpectedError: regexache.MustCompile(`expected at least 2 resource parts`), - }, - { - TestName: "invalid ARN resource prefix", - InputARN: "arn:aws:iam:us-east-1:123456789012:role/name", //lintignore:AWSAT003,AWSAT005 - ExpectedError: regexache.MustCompile(`expected resource prefix instance-profile`), - }, - { - TestName: "valid ARN", - InputARN: "arn:aws:iam:us-east-1:123456789012:instance-profile/name", //lintignore:AWSAT003,AWSAT005 - ExpectedName: "name", - }, - { - TestName: "valid ARN with multiple parts", - InputARN: "arn:aws:iam:us-east-1:123456789012:instance-profile/path/name", //lintignore:AWSAT003,AWSAT005 - ExpectedName: "name", - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.TestName, func(t *testing.T) { - t.Parallel() - - got, err := tfiam.InstanceProfileARNToName(testCase.InputARN) - - if err == nil && testCase.ExpectedError != nil { - t.Fatalf("expected error %s, got no error", testCase.ExpectedError.String()) - } - - if err != nil && testCase.ExpectedError == nil { - t.Fatalf("got unexpected error: %s", err) - } - - if err != nil && !testCase.ExpectedError.MatchString(err.Error()) { - t.Fatalf("expected error %s, got: %s", testCase.ExpectedError.String(), err) - } - - if got != testCase.ExpectedName { - t.Errorf("got %s, expected %s", got, testCase.ExpectedName) - } - }) - } -} diff --git a/internal/service/kinesis/flex.go b/internal/service/kinesis/flex.go deleted file mode 100644 index c6a83295f5b8..000000000000 --- a/internal/service/kinesis/flex.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kinesis - -import ( - "github.com/aws/aws-sdk-go/service/kinesis" -) - -func FlattenShardLevelMetrics(list []*kinesis.EnhancedMetrics) []string { - if len(list) == 0 { - return []string{} - } - strs := make([]string, 0, len(list[0].ShardLevelMetrics)) - for _, s := range list[0].ShardLevelMetrics { - strs = append(strs, *s) - } - return strs -} diff --git a/internal/service/kinesis/flex_test.go b/internal/service/kinesis/flex_test.go deleted file mode 100644 index f340759d2e13..000000000000 --- a/internal/service/kinesis/flex_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kinesis - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" -) - -func TestFlattenShardLevelMetrics(t *testing.T) { - t.Parallel() - - expanded := []*kinesis.EnhancedMetrics{ - { - ShardLevelMetrics: []*string{ - aws.String("IncomingBytes"), - aws.String("IncomingRecords"), - }, - }, - } - result := FlattenShardLevelMetrics(expanded) - if len(result) != 2 { - t.Fatalf("expected result had %d elements, but got %d", 2, len(result)) - } - if result[0] != "IncomingBytes" { - t.Fatalf("expected element 0 to be IncomingBytes, but was %s", result[0]) - } - if result[1] != "IncomingRecords" { - t.Fatalf("expected element 0 to be IncomingRecords, but was %s", result[1]) - } -} diff --git a/internal/service/kinesisanalytics/list.go b/internal/service/kinesisanalytics/list.go index 43d8984be937..4708493836f8 100644 --- a/internal/service/kinesisanalytics/list.go +++ b/internal/service/kinesisanalytics/list.go @@ -12,7 +12,7 @@ import ( // Custom Kinesisanalytics listing functions using similar formatting as other service generated code. -func ListApplicationsPages(ctx context.Context, conn *kinesisanalytics.KinesisAnalytics, input *kinesisanalytics.ListApplicationsInput, fn func(*kinesisanalytics.ListApplicationsOutput, bool) bool) error { +func listApplicationsPages(ctx context.Context, conn *kinesisanalytics.KinesisAnalytics, input *kinesisanalytics.ListApplicationsInput, fn func(*kinesisanalytics.ListApplicationsOutput, bool) bool) error { for { output, err := conn.ListApplicationsWithContext(ctx, input) if err != nil { diff --git a/internal/service/kinesisanalytics/sweep.go b/internal/service/kinesisanalytics/sweep.go index 6b63e1b08a80..66359506afb4 100644 --- a/internal/service/kinesisanalytics/sweep.go +++ b/internal/service/kinesisanalytics/sweep.go @@ -36,7 +36,7 @@ func sweepApplications(region string) error { var sweeperErrs *multierror.Error input := &kinesisanalytics.ListApplicationsInput{} - err = ListApplicationsPages(ctx, conn, input, func(page *kinesisanalytics.ListApplicationsOutput, lastPage bool) bool { + err = listApplicationsPages(ctx, conn, input, func(page *kinesisanalytics.ListApplicationsOutput, lastPage bool) bool { if page == nil { return !lastPage } diff --git a/internal/service/lakeformation/strings.go b/internal/service/lakeformation/strings.go index ece0ae79a5b9..2dc826bdf371 100644 --- a/internal/service/lakeformation/strings.go +++ b/internal/service/lakeformation/strings.go @@ -23,14 +23,3 @@ func StringSlicesEqualIgnoreOrder(s1, s2 []*string) bool { return reflect.DeepEqual(v1, v2) } - -func StringSlicesEqual(s1, s2 []*string) bool { - if len(s1) != len(s2) { - return false - } - - v1 := aws.StringValueSlice(s1) - v2 := aws.StringValueSlice(s2) - - return reflect.DeepEqual(v1, v2) -} diff --git a/internal/service/lakeformation/strings_test.go b/internal/service/lakeformation/strings_test.go index a864d14f2f06..f9f890a47395 100644 --- a/internal/service/lakeformation/strings_test.go +++ b/internal/service/lakeformation/strings_test.go @@ -65,63 +65,3 @@ func TestStringSlicesEqualIgnoreOrder(t *testing.T) { } } } - -func TestStringSlicesEqual(t *testing.T) { - t.Parallel() - - equal := []interface{}{ - []interface{}{ - []string{"a", "b", "c"}, - []string{"a", "b", "c"}, - }, - []interface{}{ - []string{"b", "a", "c"}, - []string{"b", "a", "c"}, - }, - []interface{}{ - []string{"apple", "carrot", "tomato"}, - []string{"apple", "carrot", "tomato"}, - }, - []interface{}{ - []string{"Application", "Barrier", "Chilly", "Donut"}, - []string{"Application", "Barrier", "Chilly", "Donut"}, - }, - []interface{}{ - []string{}, - []string{}, - }, - } - for _, v := range equal { - if !tflakeformation.StringSlicesEqual(aws.StringSlice(v.([]interface{})[0].([]string)), aws.StringSlice(v.([]interface{})[1].([]string))) { - t.Fatalf("%v should be equal: %v", v.([]interface{})[0].([]string), v.([]interface{})[1].([]string)) - } - } - - notEqual := []interface{}{ - []interface{}{ - []string{"a", "b", "c"}, - []string{"a", "b"}, - }, - []interface{}{ - []string{"a", "b", "c"}, - []string{"b", "a", "c"}, - }, - []interface{}{ - []string{"apple", "carrot", "tomato"}, - []string{"apple", "carrot", "tomato", "zucchini"}, - }, - []interface{}{ - []string{"Application", "Barrier", "Chilly", "Donut"}, - []string{"Application", "Barrier", "Chilly", "Donuts"}, - }, - []interface{}{ - []string{}, - []string{"Application", "Barrier", "Chilly", "Donuts"}, - }, - } - for _, v := range notEqual { - if tflakeformation.StringSlicesEqual(aws.StringSlice(v.([]interface{})[0].([]string)), aws.StringSlice(v.([]interface{})[1].([]string))) { - t.Fatalf("%v should not be equal: %v", v.([]interface{})[0].([]string), v.([]interface{})[1].([]string)) - } - } -} diff --git a/internal/service/lambda/permission.go b/internal/service/lambda/permission.go index d3383355adc4..410a48b17284 100644 --- a/internal/service/lambda/permission.go +++ b/internal/service/lambda/permission.go @@ -340,21 +340,6 @@ func FindPolicyStatementByTwoPartKey(ctx context.Context, conn *lambda.Lambda, f } } -func FindPolicyStatementByID(policy *Policy, id string) (*PolicyStatement, error) { - log.Printf("[DEBUG] Received %d statements in Lambda policy: %s", len(policy.Statement), policy.Statement) - for _, statement := range policy.Statement { - if statement.Sid == id { - return &statement, nil - } - } - - return nil, &retry.NotFoundError{ - LastRequest: id, - LastResponse: policy, - Message: fmt.Sprintf("Failed to find statement %q in Lambda policy:\n%s", id, policy.Statement), - } -} - func GetQualifierFromAliasOrVersionARN(arn string) (string, error) { matches := regexache.MustCompile(functionRegexp).FindStringSubmatch(arn) if len(matches) < 8 || matches[7] == "" { diff --git a/internal/service/polly/service_package_gen.go b/internal/service/polly/service_package_gen.go index 43fe19efee8f..4391a3cc5450 100644 --- a/internal/service/polly/service_package_gen.go +++ b/internal/service/polly/service_package_gen.go @@ -15,7 +15,12 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { - return []*types.ServicePackageFrameworkDataSource{} + return []*types.ServicePackageFrameworkDataSource{ + { + Factory: newDataSourceVoices, + Name: "Voices", + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { diff --git a/internal/service/polly/voices_data_source.go b/internal/service/polly/voices_data_source.go new file mode 100644 index 000000000000..1274c5ba5854 --- /dev/null +++ b/internal/service/polly/voices_data_source.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package polly + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/polly" + awstypes "github.com/aws/aws-sdk-go-v2/service/polly/types" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource(name="Voices") +func newDataSourceVoices(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceVoices{}, nil +} + +const ( + DSNameVoices = "Voices Data Source" +) + +type dataSourceVoices struct { + framework.DataSourceWithConfigure +} + +func (d *dataSourceVoices) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name + resp.TypeName = "aws_polly_voices" +} + +func (d *dataSourceVoices) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "engine": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.Engine](), + Optional: true, + }, + "id": framework.IDAttribute(), + "include_additional_language_codes": schema.BoolAttribute{ + Optional: true, + }, + "language_code": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.LanguageCode](), + Optional: true, + }, + }, + Blocks: map[string]schema.Block{ + "voices": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[voicesData](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "additional_language_codes": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + }, + "gender": schema.StringAttribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + "language_code": schema.StringAttribute{ + Computed: true, + }, + "language_name": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "supported_engines": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + }, + }, + }, + }, + }, + } +} +func (d *dataSourceVoices) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().PollyClient(ctx) + + var data dataSourceVoicesData + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + data.ID = types.StringValue(d.Meta().AccountID) + + input := &polly.DescribeVoicesInput{} + resp.Diagnostics.Append(flex.Expand(ctx, data, input)...) + if resp.Diagnostics.HasError() { + return + } + + // No paginator helper so pagination must be done manually + out := &polly.DescribeVoicesOutput{} + for { + page, err := conn.DescribeVoices(ctx, input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.Polly, create.ErrActionReading, DSNameVoices, data.ID.String(), err), + err.Error(), + ) + return + } + + if page == nil { + break + } + + if len(page.Voices) > 0 { + out.Voices = append(out.Voices, page.Voices...) + } + + input.NextToken = page.NextToken + if page.NextToken == nil { + break + } + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dataSourceVoicesData struct { + Engine fwtypes.StringEnum[awstypes.Engine] `tfsdk:"engine"` + ID types.String `tfsdk:"id"` + IncludeAdditionalLanguageCodes types.Bool `tfsdk:"include_additional_language_codes"` + LanguageCode fwtypes.StringEnum[awstypes.LanguageCode] `tfsdk:"language_code"` + Voices fwtypes.ListNestedObjectValueOf[voicesData] `tfsdk:"voices"` +} + +type voicesData struct { + AdditionalLanguageCodes fwtypes.ListValueOf[types.String] `tfsdk:"additional_language_codes"` + Gender types.String `tfsdk:"gender"` + ID types.String `tfsdk:"id"` + LanguageCode types.String `tfsdk:"language_code"` + LanguageName types.String `tfsdk:"language_name"` + Name types.String `tfsdk:"name"` + SupportedEngines fwtypes.ListValueOf[types.String] `tfsdk:"supported_engines"` +} diff --git a/internal/service/polly/voices_data_source_test.go b/internal/service/polly/voices_data_source_test.go new file mode 100644 index 000000000000..21eda739a7a1 --- /dev/null +++ b/internal/service/polly/voices_data_source_test.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package polly_test + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/polly/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccPollyVoicesDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_polly_voices.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PollyEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PollyEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccVoicesDataSourceConfig_basic(), + Check: resource.ComposeTestCheckFunc( + // verify a known voice is returned in the results + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "voices.*", map[string]string{ + "gender": "Female", + "language_code": "en-US", + "name": "Kendra", + }), + ), + }, + }, + }) +} + +func TestAccPollyVoicesDataSource_languageCode(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_polly_voices.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PollyEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PollyEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccVoicesDataSourceConfig_languageCode(string(types.LanguageCodeEnUs)), + Check: resource.ComposeTestCheckFunc( + // verify a known voice is returned in the results + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "voices.*", map[string]string{ + "gender": "Female", + "language_code": "en-US", + "name": "Kendra", + }), + ), + }, + }, + }) +} + +func testAccVoicesDataSourceConfig_basic() string { + return ` +data "aws_polly_voices" "test" {} +` +} + +func testAccVoicesDataSourceConfig_languageCode(languageCode string) string { + return fmt.Sprintf(` +data "aws_polly_voices" "test" { + language_code = %[1]q +} +`, languageCode) +} diff --git a/names/names.go b/names/names.go index 7eaf6c896fe6..30dd6320c9df 100644 --- a/names/names.go +++ b/names/names.go @@ -66,6 +66,7 @@ const ( ObservabilityAccessManagerEndpointID = "oam" OpenSearchServerlessEndpointID = "aoss" PipesEndpointID = "pipes" + PollyEndpointID = "polly" PricingEndpointID = "pricing" QLDBEndpointID = "qldb" RedshiftDataEndpointID = "redshift-data" diff --git a/website/docs/d/polly_voices.html.markdown b/website/docs/d/polly_voices.html.markdown new file mode 100644 index 000000000000..d9f73c2573ae --- /dev/null +++ b/website/docs/d/polly_voices.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "Polly" +layout: "aws" +page_title: "AWS: aws_polly_voices" +description: |- + Terraform data source for managing an AWS Polly Voices. +--- + +# Data Source: aws_polly_voices + +Terraform data source for managing an AWS Polly Voices. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_polly_voices" "example" {} +``` + +### With Language Code + +```terraform +data "aws_polly_voices" "example" { + language_code = "en-GB" +} +``` + +## Argument Reference + +The following arguments are optional: + +* `engine` - (Optional) Engine used by Amazon Polly when processing input text for speech synthesis. Valid values are `standard`, `neural`, and `long-form`. +* `include_additional_language_codes` - (Optional) Whether to return any bilingual voices that use the specified language as an additional language. +* `language_code` - (Optional) Language identification tag for filtering the list of voices returned. If not specified, all available voices are returned. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `id` - AWS account ID. +* `voices` - List of voices with their properties. See [`voices` Attribute Reference](#voices-attribute-reference) below. + +### `voices` Attribute Reference + +See the [AWS Polly Voice documentation](https://docs.aws.amazon.com/polly/latest/dg/API_Voice.html) for additional details. + +* `additional_language_codes` - Additional codes for languages available for the specified voice in addition to its default language. +* `gender` - Gender of the voice. +* `id` - Amazon Polly assigned voice ID. +* `language_code` - Language code of the voice. +* `language_name` - Human readable name of the language in English. +* `name` - Name of the voice. +* `supported_engines` - Specifies which engines are supported by a given voice. diff --git a/website/docs/r/dms_event_subscription.html.markdown b/website/docs/r/dms_event_subscription.html.markdown index 6b8b401094be..3d53fe93610e 100644 --- a/website/docs/r/dms_event_subscription.html.markdown +++ b/website/docs/r/dms_event_subscription.html.markdown @@ -34,7 +34,7 @@ This resource supports the following arguments: * `name` - (Required) Name of event subscription. * `enabled` - (Optional, Default: true) Whether the event subscription should be enabled. * `event_categories` - (Optional) List of event categories to listen for, see `DescribeEventCategories` for a canonical list. -* `source_type` - (Optional, Default: all events) Type of source for events. Valid values: `replication-instance` or `replication-task` +* `source_type` - (Required) Type of source for events. Valid values: `replication-instance` or `replication-task` * `source_ids` - (Required) Ids of sources to listen to. * `sns_topic_arn` - (Required) SNS topic arn to send events on. * `tags` - (Optional) Map of resource tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown new file mode 100644 index 000000000000..c4f34b5b600e --- /dev/null +++ b/website/docs/r/finspace_kx_scaling_group.html.markdown @@ -0,0 +1,82 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_scaling_group" +description: |- + Terraform resource for managing an AWS FinSpace Kx Scaling Group. +--- + +# Resource: aws_finspace_kx_scaling_group + +Terraform resource for managing an AWS FinSpace Kx Scaling Group. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_scaling_group" "example" { + name = "my-tf-kx-scalinggroup" + environment_id = aws_finspace_kx_environment.example.id + availability_zone_id = "use1-az2" + host_type = "kx.sg.4xlarge" +} +``` + +## Argument Reference + +The following arguments are required: + +* `availability_zone_id` - (Required) The availability zone identifiers for the requested regions. +* `environment_id` - (Required) A unique identifier for the kdb environment, where you want to create the scaling group. +* `name` - (Required) Unique name for the scaling group that you want to create. +* `host_type` - (Required) The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. + +The following arguments are optional: + +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. You can add up to 50 tags to a scaling group. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX Scaling Group. +* `clusters` - The list of Managed kdb clusters that are currently active in the given scaling group. +* `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. +* `status` - The status of scaling group. + * `CREATING` – The scaling group creation is in progress. + * `CREATE_FAILED` – The scaling group creation has failed. + * `ACTIVE` – The scaling group is active. + * `UPDATING` – The scaling group is in the process of being updated. + * `UPDATE_FAILED` – The update action failed. + * `DELETING` – The scaling group is in the process of being deleted. + * `DELETE_FAILED` – The system failed to delete the scaling group. + * `DELETED` – The scaling group is successfully deleted. +* `status_reason` - The error message when a failed state occurs. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `45m`) +* `update` - (Default `30m`) +* `delete` - (Default `60m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx scaling group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_scaling_group.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Scaling Group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_scaling_group.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup +``` diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown new file mode 100644 index 000000000000..0ddc66dc9e6f --- /dev/null +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -0,0 +1,97 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_volume" +description: |- + Terraform resource for managing an AWS FinSpace Kx Volume. +--- + +# Resource: aws_finspace_kx_volume + +Terraform resource for managing an AWS FinSpace Kx Volume. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_volume" "example" { + name = "my-tf-kx-volume" + environment_id = aws_finspace_kx_environment.example.id + availability_zones = "use1-az2" + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + size = 1200 + type = "SSD_250" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `az_mode` - (Required) The number of availability zones you want to assign per volume. Currently, Finspace only support SINGLE for volumes. + * `SINGLE` - Assigns one availability zone per volume. +* `environment_id` - (Required) A unique identifier for the kdb environment, whose clusters can attach to the volume. +* `name` - (Required) Unique name for the volumr that you want to create. +* `type` - (Required) The type of file system volume. Currently, FinSpace only supports the `NAS_1` volume type. When you select the `NAS_1` volume type, you must also provide `nas1_configuration`. +* `availability_zones` - (Required) The identifier of the AWS Availability Zone IDs. + +The following arguments are optional: + +* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (`NAS_1`) file system volume. This parameter is required when `volume_type` is `NAS_1`. See [`nas1_configuration` Argument Reference](#nas1_configuration-argument-reference) below. +* `description` - (Optional) Description of the volume. +* `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume + +### `nas1_configuration` Argument Reference + +The `nas1_configuration` block supports the following arguments: + +* `size` - (Required) The size of the network attached storage. +* `type` - (Required) The type of the network attached storage. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX volume. +* `created_timestamp` - The timestamp at which the volume was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `status` - The status of volume creation. + * `CREATING` – The volume creation is in progress. + * `CREATE_FAILED` – The volume creation has failed. + * `ACTIVE` – The volume is active. + * `UPDATING` – The volume is in the process of being updated. + * `UPDATE_FAILED` – The update action failed. + * `UPDATED` – The volume is successfully updated. + * `DELETING` – The volume is in the process of being deleted. + * `DELETE_FAILED` – The system failed to delete the volume. + * `DELETED` – The volume is successfully deleted. +* `status_reason` - The error message when a failed state occurs. +* `last_modified_timestamp` - Last timestamp at which the volume was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `45m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx Volume using the `id` (environment ID and volume name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_volume.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Volume using the `id` (environment ID and volume name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_volume.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume +```