From 74c398f6b5f164006b262f1a1abbeb6b81296397 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Sun, 19 Nov 2023 21:35:20 +0000 Subject: [PATCH 01/45] Initial code --- internal/service/securitylake/data_lake.go | 748 ++++++++++++++++++ .../service/securitylake/data_lake_test.go | 325 ++++++++ .../securitylake/service_package_gen.go | 10 +- .../r/securitylake_data_lake.html.markdown | 60 ++ ...ylake_securitylake_data_lake.html.markdown | 60 ++ 5 files changed, 1202 insertions(+), 1 deletion(-) create mode 100644 internal/service/securitylake/data_lake.go create mode 100644 internal/service/securitylake/data_lake_test.go create mode 100644 website/docs/r/securitylake_data_lake.html.markdown create mode 100644 website/docs/r/securitylake_securitylake_data_lake.html.markdown diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go new file mode 100644 index 000000000000..e3b166a92b0e --- /dev/null +++ b/internal/service/securitylake/data_lake.go @@ -0,0 +1,748 @@ +package securitylake + +import ( + "context" + "errors" + "time" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/securitylake" + awstypes "github.com/aws/aws-sdk-go-v2/service/securitylake/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + // "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + // "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for resource registration to the Provider. DO NOT EDIT. +// @FrameworkResource(name="Data Lake") +// @Tags(identifierAttribute="arn") +func newResourceDataLake(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceDataLake{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + r.SetDefaultUpdateTimeout(30 * time.Minute) + r.SetDefaultDeleteTimeout(30 * time.Minute) + + return r, nil +} + +const ( + ResNameDataLake = "Data Lake" +) + +type resourceDataLake struct { + framework.ResourceWithConfigure + framework.WithTimeouts +} + +func (r *resourceDataLake) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_securitylake_data_lake" +} + +func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "arn": framework.ARNAttributeComputedOnly(), + "metastore_manager_role_arn": schema.StringAttribute{ + Required: true, + }, + "id": framework.IDAttribute(), + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + "configuration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "region": schema.StringAttribute{ + Required: true, + }, + }, + Blocks: map[string]schema.Block{ + "encryption_configuration": schema.SetNestedBlock{ + Validators: []validator.Set{ + setvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "kms_key_id": schema.StringAttribute{ + Required: true, + }, + }, + }, + }, + "lifecycle_configuration": schema.SetNestedBlock{ + Validators: []validator.Set{ + setvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "expiration": schema.SetNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "days": schema.Int64Attribute{ + Required: true, + }, + }, + }, + }, + "transitions": schema.SetNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "days": schema.Int64Attribute{ + Required: true, + }, + "storage_class": schema.StringAttribute{ + Required: true, + }, + }, + }, + }, + }, + }, + }, + "replication_configuration": schema.SetNestedBlock{ + Validators: []validator.Set{ + setvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "role_arn": schema.StringAttribute{ + Required: true, + }, + "regions": schema.StringAttribute{ + Required: true, + }, + }, + }, + }, + }, + }, + }, + "timeouts": timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().SecurityLakeClient(ctx) + + var plan resourceDataLakeData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var configurations []dataLakeConfigurationsData + resp.Diagnostics.Append(plan.Configurations.ElementsAs(ctx, &configurations, false)...) + if resp.Diagnostics.HasError() { + return + } + + in := &securitylake.CreateDataLakeInput{ + Configurations: expanddataLakeConfigurations(ctx,configurations), + MetaStoreManagerRoleArn: aws.String(plan.MetastoreManagerRoleArn.ValueString()), + } + + out, err := conn.CreateDataLake(ctx, in) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, plan.ARN.String(), err), + err.Error(), + ) + return + } + if out == nil || out.DataLakes == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, plan.ARN.String(), nil), + errors.New("empty output").Error(), + ) + return + } + + plan.ARN = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) + + id := generateDataLakeID(plan.ARN.String(), *out.DataLakes[0].Region) + + plan.ID = types.StringValue(id) + + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + _, err = waitDataLakeCreated(ctx, conn, plan.ID.ValueString(), createTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, plan.ARN.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().SecurityLakeClient(ctx) + + var state resourceDataLakeData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findDataLakeByID(ctx, conn, state.ID.ValueString()) + // TIP: -- 4. Remove resource from state if it is not found + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionSetting, ResNameDataLake, state.ID.String(), err), + err.Error(), + ) + return + } + + + state.ARN = flex.StringToFramework(ctx, out.DataLakeArn) + state.ID = flex.StringToFramework(ctx, out.DataLakeArn) + + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // conn := r.Meta().SecurityLakeClient(ctx) + + // // TIP: -- 2. Fetch the plan + // var plan, state resourceDataLakeData + // resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + // resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + // if resp.Diagnostics.HasError() { + // return + // } + + // // TIP: -- 3. Populate a modify input structure and check for changes + // if !plan.Name.Equal(state.Name) || + // !plan.Description.Equal(state.Description) || + // !plan.ComplexArgument.Equal(state.ComplexArgument) || + // !plan.Type.Equal(state.Type) { + + // in := &securitylake.UpdateDataLakeInput{ + // // TIP: Mandatory or fields that will always be present can be set when + // // you create the Input structure. (Replace these with real fields.) + // DataLakeId: aws.String(plan.ID.ValueString()), + // DataLakeName: aws.String(plan.Name.ValueString()), + // DataLakeType: aws.String(plan.Type.ValueString()), + // } + + // if !plan.Description.IsNull() { + // // TIP: Optional fields should be set based on whether or not they are + // // used. + // in.Description = aws.String(plan.Description.ValueString()) + // } + // if !plan.ComplexArgument.IsNull() { + // // TIP: Use an expander to assign a complex argument. The elements must be + // // deserialized into the appropriate struct before being passed to the expander. + // var tfList []complexArgumentData + // resp.Diagnostics.Append(plan.ComplexArgument.ElementsAs(ctx, &tfList, false)...) + // if resp.Diagnostics.HasError() { + // return + // } + + // in.ComplexArgument = expandComplexArgument(tfList) + // } + + // // TIP: -- 4. Call the AWS modify/update function + // out, err := conn.UpdateDataLake(ctx, in) + // if err != nil { + // resp.Diagnostics.AddError( + // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.String(), err), + // err.Error(), + // ) + // return + // } + // if out == nil || out.DataLake == nil { + // resp.Diagnostics.AddError( + // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.String(), nil), + // errors.New("empty output").Error(), + // ) + // return + // } + + // // TIP: Using the output from the update function, re-set any computed attributes + // plan.ARN = flex.StringToFramework(ctx, out.DataLake.Arn) + // plan.ID = flex.StringToFramework(ctx, out.DataLake.DataLakeId) + // } + + + // // TIP: -- 5. Use a waiter to wait for update to complete + // updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) + // _, err := waitDataLakeUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) + // if err != nil { + // resp.Diagnostics.AddError( + // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForUpdate, ResNameDataLake, plan.ID.String(), err), + // err.Error(), + // ) + // return + // } + + + // // TIP: -- 6. Save the request plan to response state + // resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *resourceDataLake) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // conn := r.Meta().SecurityLakeClient(ctx) + + // // TIP: -- 2. Fetch the state + // var state resourceDataLakeData + // resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + // if resp.Diagnostics.HasError() { + // return + // } + + // // TIP: -- 3. Populate a delete input structure + // in := &securitylake.DeleteDataLakeInput{ + // DataLakeId: aws.String(state.ID.ValueString()), + // } + + // // TIP: -- 4. Call the AWS delete function + // _, err := conn.DeleteDataLake(ctx, in) + // // TIP: On rare occassions, the API returns a not found error after deleting a + // // resource. If that happens, we don't want it to show up as an error. + // if err != nil { + // var nfe *awstypes.ResourceNotFoundException + // if errors.As(err, &nfe) { + // return + // } + // resp.Diagnostics.AddError( + // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionDeleting, ResNameDataLake, state.ID.String(), err), + // err.Error(), + // ) + // return + // } + + // // TIP: -- 5. Use a waiter to wait for delete to complete + // deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + // _, err = waitDataLakeDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) + // if err != nil { + // resp.Diagnostics.AddError( + // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForDeletion, ResNameDataLake, state.ID.String(), err), + // err.Error(), + // ) + // return + // } +} + +func (r *resourceDataLake) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{string(awstypes.DataLakeStatusInitialized)}, + Target: []string{string(awstypes.DataLakeStatusCompleted)}, + Refresh: createStatusDataLake(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*securitylake.ListDataLakesOutput); ok { + return out, err + } + + return nil, err +} + +// TIP: It is easier to determine whether a resource is updated for some +// resources than others. The best case is a status flag that tells you when +// the update has been fully realized. Other times, you can check to see if a +// key resource argument is updated to a new value or not. +func waitDataLakeUpdated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{string(awstypes.DataLakeStatusPending)}, + Target: []string{string(awstypes.DataLakeStatusCompleted)}, + Refresh: updateStatusDataLake(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*securitylake.ListDataLakesOutput); ok { + return out, err + } + + return nil, err +} + + +func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{string(awstypes.DataLakeStatusPending), string(awstypes.DataLakeStatusCompleted)}, + Target: []string{}, + Refresh: updateStatusDataLake(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*securitylake.ListDataLakesOutput); ok { + return out, err + } + + return nil, err +} + + +func createStatusDataLake(ctx context.Context, conn *securitylake.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findDataLakeByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + return out, string(out.CreateStatus), nil + } +} + +func updateStatusDataLake(ctx context.Context, conn *securitylake.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findDataLakeByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + return out, string(out.UpdateStatus.Status), nil + } +} + +func findDataLakeByID(ctx context.Context, conn *securitylake.Client, id string) (*awstypes.DataLakeResource, error) { + region := extractRegionFromID(id) + + in := &securitylake.ListDataLakesInput{ + Regions: []string{region}, + } + + out, err := conn.ListDataLakes(ctx, in) + if err != nil { + var nfe *awstypes.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return &out.DataLakes[0], nil +} + + +// func flattenComplexArgument(ctx context.Context, apiObject *securitylake.ComplexArgument) (types.List, diag.Diagnostics) { +// var diags diag.Diagnostics +// elemType := types.ObjectType{AttrTypes: complexArgumentAttrTypes} + +// if apiObject == nil { +// return types.ListNull(elemType), diags +// } + +// obj := map[string]attr.Value{ +// "nested_required": flex.StringValueToFramework(ctx, apiObject.NestedRequired), +// "nested_optional": flex.StringValueToFramework(ctx, apiObject.NestedOptional), +// } +// objVal, d := types.ObjectValue(complexArgumentAttrTypes, obj) +// diags.Append(d...) + +// listVal, d := types.ListValue(elemType, []attr.Value{objVal}) +// diags.Append(d...) + +// return listVal, diags +// } + +// func flattenComplexArguments(ctx context.Context, apiObjects []*securitylake.ComplexArgument) (types.List, diag.Diagnostics) { +// var diags diag.Diagnostics +// elemType := types.ObjectType{AttrTypes: complexArgumentAttrTypes} + +// if len(apiObjects) == 0 { +// return types.ListNull(elemType), diags +// } + +// elems := []attr.Value{} +// for _, apiObject := range apiObjects { +// if apiObject == nil { +// continue +// } + +// obj := map[string]attr.Value{ +// "nested_required": flex.StringValueToFramework(ctx, apiObject.NestedRequired), +// "nested_optional": flex.StringValueToFramework(ctx, apiObject.NestedOptional), +// } +// objVal, d := types.ObjectValue(complexArgumentAttrTypes, obj) +// diags.Append(d...) + +// elems = append(elems, objVal) +// } + +// listVal, d := types.ListValue(elemType, elems) +// diags.Append(d...) + +// return listVal, diags +// } + + +func expanddataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigurationsData) []awstypes.DataLakeConfiguration { + var diags diag.Diagnostics + if len(tfList) == 0 { + return nil + } + + var apiObject []awstypes.DataLakeConfiguration + var encryptionConfiguration []dataLakeConfigurationsEncryption + var lifecycleConfiguration []dataLakeConfigurationsLifecycle + var replicationConfiguration []dataLakeConfigurationsReplicationConfiguration + + for _, tfObj := range tfList { + diags.Append(tfObj.EncryptionConfiguration.ElementsAs(ctx, &encryptionConfiguration, false)...) + diags.Append(tfObj.LifecycleConfiguration.ElementsAs(ctx, &lifecycleConfiguration, false)...) + diags.Append(tfObj.ReplicationConfiguration.ElementsAs(ctx, &replicationConfiguration, false)...) + + item := awstypes.DataLakeConfiguration{ + Region: aws.String(tfObj.Region.ValueString()), + } + + if !tfObj.EncryptionConfiguration.IsNull() { + item.EncryptionConfiguration = expandEncryptionConfiguration(encryptionConfiguration) + } + + if !tfObj.LifecycleConfiguration.IsNull() { + item.LifecycleConfiguration,_ = expandLifecycleConfiguration(ctx, lifecycleConfiguration) + } + + if !tfObj.ReplicationConfiguration.IsNull() { + item.ReplicationConfiguration = expandReplicationConfiguration(ctx, replicationConfiguration) + } + + apiObject = append(apiObject, item) + } + + return apiObject +} + +func expandEncryptionConfiguration(tfList []dataLakeConfigurationsEncryption) *awstypes.DataLakeEncryptionConfiguration { + if len(tfList) == 0 { + return nil + } + + tfObj := tfList[0] + apiObject := &awstypes.DataLakeEncryptionConfiguration{} + + if !tfObj.KmsKeyID.IsNull() { + apiObject.KmsKeyId = aws.String(tfObj.KmsKeyID.ValueString()) + } + + return apiObject +} + + +func expandLifecycleConfiguration(ctx context.Context, tfList []dataLakeConfigurationsLifecycle) (*awstypes.DataLakeLifecycleConfiguration,diag.Diagnostics) { + var diags diag.Diagnostics + + if len(tfList) == 0 { + return nil, diags + } + + tfObj := tfList[0] + var transitions []dataLakeConfigurationsLifecycleTransitions + diags.Append(tfObj.Transitions.ElementsAs(ctx, &transitions, false)...) + var expiration []dataLakeConfigurationsLifecycleExpiration + diags.Append(tfObj.Expiration.ElementsAs(ctx, &expiration, false)...) + apiObject := &awstypes.DataLakeLifecycleConfiguration{} + + if !tfObj.Expiration.IsNull() { + apiObject.Expiration = expandLifecycleExpiration(expiration) + } + + if !tfObj.Transitions.IsNull() { + apiObject.Transitions = expandLifecycleTransitions(transitions) + } + + return apiObject, diags +} + +func expandLifecycleExpiration(tfList []dataLakeConfigurationsLifecycleExpiration) *awstypes.DataLakeLifecycleExpiration { + if len(tfList) == 0 { + return nil + } + + tfObj := tfList[0] + apiObject := &awstypes.DataLakeLifecycleExpiration{} + + if !tfObj.Days.IsNull() { + int32Days := int32(tfObj.Days.ValueInt64()) + apiObject.Days = aws.Int32(int32Days) + } + + return apiObject +} + +func expandLifecycleTransitions(tfList []dataLakeConfigurationsLifecycleTransitions) []awstypes.DataLakeLifecycleTransition { + if len(tfList) == 0 { + return nil + } + + var apiObject []awstypes.DataLakeLifecycleTransition + + for _, tfObj := range tfList { + item := awstypes.DataLakeLifecycleTransition{} + + if !tfObj.Days.IsNull() { + int32Days := int32(tfObj.Days.ValueInt64()) + item.Days = aws.Int32(int32Days) + } + + if !tfObj.StorageClass.IsNull() { + item.StorageClass = aws.String(tfObj.StorageClass.ValueString()) + } + + apiObject = append(apiObject, item) + } + + return apiObject +} + +func expandReplicationConfiguration(ctx context.Context, tfList []dataLakeConfigurationsReplicationConfiguration) *awstypes.DataLakeReplicationConfiguration { + if len(tfList) == 0 { + return nil + } + + tfObj := tfList[0] + apiObject := &awstypes.DataLakeReplicationConfiguration{} + + if !tfObj.RoleArn.IsNull() { + apiObject.RoleArn = aws.String(tfObj.RoleArn.ValueString()) + } + + if !tfObj.Regions.IsNull() { + apiObject.Regions = flex.ExpandFrameworkStringValueList(ctx, tfObj.Regions) + } + + return apiObject +} + + +var ( + dataLakeConfigurationsEncryptionTypes = map[string]attr.Type{ + "kms_key_id": types.StringType, + } + + dataLakeConfigurationsLifecycleExpirationTypes = map[string]attr.Type{ + "days": types.Int64Type, + } + + dataLakeConfigurationsLifecycleTransitionsTypes = map[string]attr.Type{ + "days": types.Int64Type, + "storage_class": types.StringType, + } + + dataLakeConfigurationsLifecycleTypes = map[string]attr.Type{ + "expiration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes}}, + "transitions": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes}}, + } + + dataLakeConfigurationsReplicationConfigurationTypes = map[string]attr.Type{ + "role_arn": types.StringType, + "regions": types.ListType{ElemType: types.StringType}, + } +) + +type resourceDataLakeData struct { + ARN types.String `tfsdk:"arn"` + MetastoreManagerRoleArn types.String `tfsdk:"metastore_manager_role_arn"` + ID types.String `tfsdk:"id"` + Configurations types.List `tfsdk:"configurations"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +type dataLakeConfigurationsData struct { + EncryptionConfiguration types.Set `tfsdk:"encryption_configuration"` + LifecycleConfiguration types.Set `tfsdk:"lifecycle_configuration"` + Region types.String `tfsdk:"nested_optional"` + ReplicationConfiguration types.Set `tfsdk:"replication_configuration"` +} + +type dataLakeConfigurationsEncryption struct { + KmsKeyID types.String `tfsdk:"kms_key_id"` +} + +type dataLakeConfigurationsLifecycle struct { + Expiration types.Set `tfsdk:"expiration"` + Transitions types.Set `tfsdk:"transitions"` +} + +type dataLakeConfigurationsLifecycleExpiration struct { + Days types.Int64 `tfsdk:"days"` +} + +type dataLakeConfigurationsLifecycleTransitions struct { + Days types.Int64 `tfsdk:"days"` + StorageClass types.String `tfsdk:"storage_class"` +} + +type dataLakeConfigurationsReplicationConfiguration struct { + RoleArn types.String `tfsdk:"role_arn"` + Regions types.List `tfsdk:"regions"` +} + +func generateDataLakeID(arn, region string) string { + return arn + "|" + region +} + +func extractRegionFromID(id string) string { + parts := strings.Split(id, "|") + if len(parts) < 2 { + // Handle error or return a default value + return "" + } + return parts[1] +} \ No newline at end of file diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go new file mode 100644 index 000000000000..ea45cf2ee796 --- /dev/null +++ b/internal/service/securitylake/data_lake_test.go @@ -0,0 +1,325 @@ +package securitylake_test +// **PLEASE DELETE THIS AND ALL TIP COMMENTS BEFORE SUBMITTING A PR FOR REVIEW!** +// +// TIP: ==== INTRODUCTION ==== +// Thank you for trying the skaff tool! +// +// You have opted to include these helpful comments. They all include "TIP:" +// to help you find and remove them when you're done with them. +// +// While some aspects of this file are customized to your input, the +// scaffold tool does *not* look at the AWS API and ensure it has correct +// function, structure, and variable names. It makes guesses based on +// commonalities. You will need to make significant adjustments. +// +// In other words, as generated, this is a rough outline of the work you will +// need to do. If something doesn't make sense for your situation, get rid of +// it. + +import ( + // TIP: ==== IMPORTS ==== + // This is a common set of imports but not customized to your code since + // your code hasn't been written yet. Make sure you, your IDE, or + // goimports -w fixes these imports. + // + // The provider linter wants your imports to be in two groups: first, + // standard library (i.e., "fmt" or "strings"), second, everything else. + // + // Also, AWS Go SDK v2 may handle nested structures differently than v1, + // using the services/securitylake/types package. If so, you'll + // need to import types and reference the nested types, e.g., as + // types.. + "context" + "fmt" + "regexp" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/securitylake" + "github.com/aws/aws-sdk-go-v2/service/securitylake/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/names" + + // TIP: You will often need to import the package that this test file lives + // in. Since it is in the "test" context, it must import the package to use + // any normal context constants, variables, or functions. + tfsecuritylake "github.com/hashicorp/terraform-provider-aws/internal/service/securitylake" +) + +// TIP: File Structure. The basic outline for all test files should be as +// follows. Improve this resource's maintainability by following this +// outline. +// +// 1. Package declaration (add "_test" since this is a test file) +// 2. Imports +// 3. Unit tests +// 4. Basic test +// 5. Disappears test +// 6. All the other tests +// 7. Helper functions (exists, destroy, check, etc.) +// 8. Functions that return Terraform configurations + +// TIP: ==== UNIT TESTS ==== +// This is an example of a unit test. Its name is not prefixed with +// "TestAcc" like an acceptance test. +// +// Unlike acceptance tests, unit tests do not access AWS and are focused on a +// function (or method). Because of this, they are quick and cheap to run. +// +// In designing a resource's implementation, isolate complex bits from AWS bits +// so that they can be tested through a unit test. We encourage more unit tests +// in the provider. +// +// Cut and dry functions using well-used patterns, like typical flatteners and +// expanders, don't need unit testing. However, if they are complex or +// intricate, they should be unit tested. +func TestDataLakeExampleUnitTest(t *testing.T) { + testCases := []struct { + TestName string + Input string + Expected string + Error bool + }{ + { + TestName: "empty", + Input: "", + Expected: "", + Error: true, + }, + { + TestName: "descriptive name", + Input: "some input", + Expected: "some output", + Error: false, + }, + { + TestName: "another descriptive name", + Input: "more input", + Expected: "more output", + Error: false, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.TestName, func(t *testing.T) { + got, err := tfsecuritylake.FunctionFromResource(testCase.Input) + + if err != nil && !testCase.Error { + t.Errorf("got error (%s), expected no error", err) + } + + if err == nil && testCase.Error { + t.Errorf("got (%s) and no error, expected error", got) + } + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} + +// TIP: ==== ACCEPTANCE TESTS ==== +// This is an example of a basic acceptance test. This should test as much of +// standard functionality of the resource as possible, and test importing, if +// applicable. We prefix its name with "TestAcc", the service, and the +// resource name. +// +// Acceptance test access AWS and cost money to run. +func TestAccSecurityLakeDataLake_basic(t *testing.T) { + ctx := acctest.Context(t) + // TIP: This is a long-running test guard for tests that run longer than + // 300s (5 min) generally. + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake securitylake.DescribeDataLakeResponse + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLakeEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), + resource.TestCheckResourceAttrSet(resourceName, "maintenance_window_start_time.0.day_of_week"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user.*", map[string]string{ + "console_access": "false", + "groups.#": "0", + "username": "Test", + "password": "TestTest1234", + }), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "securitylake", regexp.MustCompile(`datalake:+.`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccSecurityLakeDataLake_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake securitylake.DescribeDataLakeResponse + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLakeEndpointID) + testAccPreCheck(t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_basic(rName, testAccDataLakeVersionNewer), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + // TIP: The Plugin-Framework disappears helper is similar to the Plugin-SDK version, + // but expects a new resource factory function as the third argument. To expose this + // private function to the testing package, you may need to add a line like the following + // to exports_test.go: + // + // var ResourceDataLake = newResourceDataLake + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfsecuritylake.ResourceDataLake, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckDataLakeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityLakeClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_securitylake_data_lake" { + continue + } + + input := &securitylake.DescribeDataLakeInput{ + DataLakeId: aws.String(rs.Primary.ID), + } + _, err := conn.DescribeDataLake(ctx, &securitylake.DescribeDataLakeInput{ + DataLakeId: aws.String(rs.Primary.ID), + }) + if errs.IsA[*types.ResourceNotFoundException](err){ + return nil + } + if err != nil { + return nil + } + + return create.Error(names.SecurityLake, create.ErrActionCheckingDestroyed, tfsecuritylake.ResNameDataLake, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *securitylake.DescribeDataLakeResponse) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.SecurityLake, create.ErrActionCheckingExistence, tfsecuritylake.ResNameDataLake, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.SecurityLake, create.ErrActionCheckingExistence, tfsecuritylake.ResNameDataLake, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityLakeClient(ctx) + resp, err := conn.DescribeDataLake(ctx, &securitylake.DescribeDataLakeInput{ + DataLakeId: aws.String(rs.Primary.ID), + }) + + if err != nil { + return create.Error(names.SecurityLake, create.ErrActionCheckingExistence, tfsecuritylake.ResNameDataLake, rs.Primary.ID, err) + } + + *datalake = *resp + + return nil + } +} + +func testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityLakeClient(ctx) + + input := &securitylake.ListDataLakesInput{} + _, err := conn.ListDataLakes(ctx, input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccCheckDataLakeNotRecreated(before, after *securitylake.DescribeDataLakeResponse) resource.TestCheckFunc { + return func(s *terraform.State) error { + if before, after := aws.ToString(before.DataLakeId), aws.ToString(after.DataLakeId); before != after { + return create.Error(names.SecurityLake, create.ErrActionCheckingNotRecreated, tfsecuritylake.ResNameDataLake, aws.ToString(before.DataLakeId), errors.New("recreated")) + } + + return nil + } +} + +func testAccDataLakeConfig_basic(rName, version string) string { + return fmt.Sprintf(` +resource "aws_security_group" "test" { + name = %[1]q +} + +resource "aws_securitylake_data_lake" "test" { + data_lake_name = %[1]q + engine_type = "ActiveSecurityLake" + engine_version = %[2]q + host_instance_type = "securitylake.t2.micro" + security_groups = [aws_security_group.test.id] + authentication_strategy = "simple" + storage_type = "efs" + + logs { + general = true + } + + user { + username = "Test" + password = "TestTest1234" + } +} +`, rName, version) +} diff --git a/internal/service/securitylake/service_package_gen.go b/internal/service/securitylake/service_package_gen.go index 37f6cb5d793f..f5c759a7281f 100644 --- a/internal/service/securitylake/service_package_gen.go +++ b/internal/service/securitylake/service_package_gen.go @@ -19,7 +19,15 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { - return []*types.ServicePackageFrameworkResource{} + return []*types.ServicePackageFrameworkResource{ + { + Factory: newResourceDataLake, + Name: "Data Lake", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, + } } func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { diff --git a/website/docs/r/securitylake_data_lake.html.markdown b/website/docs/r/securitylake_data_lake.html.markdown new file mode 100644 index 000000000000..b8b33966be3e --- /dev/null +++ b/website/docs/r/securitylake_data_lake.html.markdown @@ -0,0 +1,60 @@ +--- +subcategory: "Security Lake" +layout: "aws" +page_title: "AWS: aws_securitylake_data_lake" +description: |- + Terraform resource for managing an AWS Security Lake Data Lake. +--- +` +# Resource: aws_securitylake_data_lake + +Terraform resource for managing an AWS Security Lake Data Lake. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_securitylake_data_lake" "example" { +} +``` + +## Argument Reference + +The following arguments are required: + +* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +The following arguments are optional: + +* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - ARN of the Data Lake. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `60m`) +* `update` - (Default `180m`) +* `delete` - (Default `90m`) + +## Import + +Security Lake Data Lake can be imported using the `example_id_arg`, e.g., + +``` +$ terraform import aws_securitylake_data_lake.example rft-8012925589 +``` diff --git a/website/docs/r/securitylake_securitylake_data_lake.html.markdown b/website/docs/r/securitylake_securitylake_data_lake.html.markdown new file mode 100644 index 000000000000..f235dac65434 --- /dev/null +++ b/website/docs/r/securitylake_securitylake_data_lake.html.markdown @@ -0,0 +1,60 @@ +--- +subcategory: "Security Lake" +layout: "aws" +page_title: "AWS: aws_securitylake_securitylake_data_lake" +description: |- + Terraform resource for managing an AWS Security Lake Securitylake Data Lake. +--- +` +# Resource: aws_securitylake_securitylake_data_lake + +Terraform resource for managing an AWS Security Lake Securitylake Data Lake. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_securitylake_securitylake_data_lake" "example" { +} +``` + +## Argument Reference + +The following arguments are required: + +* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +The following arguments are optional: + +* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - ARN of the Securitylake Data Lake. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `60m`) +* `update` - (Default `180m`) +* `delete` - (Default `90m`) + +## Import + +Security Lake Securitylake Data Lake can be imported using the `example_id_arg`, e.g., + +``` +$ terraform import aws_securitylake_securitylake_data_lake.example rft-8012925589 +``` From 7167eb44e75da1347537d87abc547923b514a43a Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Wed, 22 Nov 2023 17:47:53 +0000 Subject: [PATCH 02/45] Successfull Creation part --- internal/service/securitylake/data_lake.go | 502 +++++++++++------- .../service/securitylake/data_lake_test.go | 310 +++-------- internal/service/securitylake/generate.go | 1 + .../r/securitylake_data_lake2.html.markdown | 60 +++ ...ylake_exception_subscription.html.markdown | 60 +++ 5 files changed, 508 insertions(+), 425 deletions(-) create mode 100644 website/docs/r/securitylake_data_lake2.html.markdown create mode 100644 website/docs/r/securitylake_exception_subscription.html.markdown diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index e3b166a92b0e..1dbc1f9ac1c2 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -3,8 +3,9 @@ package securitylake import ( "context" "errors" - "time" + "fmt" "strings" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/securitylake" @@ -35,7 +36,7 @@ import ( // @Tags(identifierAttribute="arn") func newResourceDataLake(_ context.Context) (resource.ResourceWithConfigure, error) { r := &resourceDataLake{} - + r.SetDefaultCreateTimeout(30 * time.Minute) r.SetDefaultUpdateTimeout(30 * time.Minute) r.SetDefaultDeleteTimeout(30 * time.Minute) @@ -60,15 +61,15 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "arn": framework.ARNAttributeComputedOnly(), - "metastore_manager_role_arn": schema.StringAttribute{ + "id": framework.IDAttribute(), + "meta_store_manager_role_arn": schema.StringAttribute{ Required: true, }, - "id": framework.IDAttribute(), names.AttrTags: tftags.TagsAttribute(), names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), }, Blocks: map[string]schema.Block{ - "configuration": schema.ListNestedBlock{ + "configurations": schema.ListNestedBlock{ Validators: []validator.List{ listvalidator.SizeAtLeast(1), listvalidator.SizeAtMost(1), @@ -87,7 +88,7 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "kms_key_id": schema.StringAttribute{ - Required: true, + Optional: true, }, }, }, @@ -102,7 +103,7 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "days": schema.Int64Attribute{ - Required: true, + Optional: true, }, }, }, @@ -111,10 +112,10 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "days": schema.Int64Attribute{ - Required: true, + Optional: true, }, "storage_class": schema.StringAttribute{ - Required: true, + Optional: true, }, }, }, @@ -129,10 +130,10 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "role_arn": schema.StringAttribute{ - Required: true, + Optional: true, }, "regions": schema.StringAttribute{ - Required: true, + Optional: true, }, }, }, @@ -151,7 +152,6 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { conn := r.Meta().SecurityLakeClient(ctx) - var plan resourceDataLakeData resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) if resp.Diagnostics.HasError() { @@ -163,58 +163,59 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques if resp.Diagnostics.HasError() { return } - + in := &securitylake.CreateDataLakeInput{ - Configurations: expanddataLakeConfigurations(ctx,configurations), - MetaStoreManagerRoleArn: aws.String(plan.MetastoreManagerRoleArn.ValueString()), + Configurations: expanddataLakeConfigurations(ctx, configurations), + MetaStoreManagerRoleArn: aws.String(plan.MetaStoreManagerRoleArn.ValueString()), + // Tags: getTagsIn(ctx), } - + out, err := conn.CreateDataLake(ctx, in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, plan.ARN.String(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, plan.ID.ValueString(), err), err.Error(), ) return } if out == nil || out.DataLakes == nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, plan.ARN.String(), nil), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, plan.ID.ValueString(), nil), errors.New("empty output").Error(), ) return } - plan.ARN = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) - - id := generateDataLakeID(plan.ARN.String(), *out.DataLakes[0].Region) - - plan.ID = types.StringValue(id) + plan.ID = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) + state := plan - createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - _, err = waitDataLakeCreated(ctx, conn, plan.ID.ValueString(), createTimeout) + + createTimeout := r.CreateTimeout(ctx, state.Timeouts) + _, err = waitDataLakeCreated(ctx, conn, state.ID.ValueString(), createTimeout) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, plan.ARN.String(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, state.ID.ValueString(), err), err.Error(), ) return } - + + state.Configurations, _ = flattenDataLakeConfigurations(ctx, out.DataLakes) + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) } func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { conn := r.Meta().SecurityLakeClient(ctx) - + var state resourceDataLakeData resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { return } - - out, err := findDataLakeByID(ctx, conn, state.ID.ValueString()) - // TIP: -- 4. Remove resource from state if it is not found + + out, err := FindDataLakeByID(ctx, conn, state.ID.ValueString()) + if tfresource.NotFound(err) { resp.State.RemoveResource(ctx) return @@ -227,17 +228,17 @@ func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, r return } - - state.ARN = flex.StringToFramework(ctx, out.DataLakeArn) - state.ID = flex.StringToFramework(ctx, out.DataLakeArn) - + state.ARN = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) + state.ID = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) + state.Configurations ,_ = flattenDataLakeConfigurations(ctx, out.DataLakes) + fmt.Println(state.ID.ValueString()) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // conn := r.Meta().SecurityLakeClient(ctx) - + // // TIP: -- 2. Fetch the plan // var plan, state resourceDataLakeData // resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) @@ -245,7 +246,7 @@ func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateReques // if resp.Diagnostics.HasError() { // return // } - + // // TIP: -- 3. Populate a modify input structure and check for changes // if !plan.Name.Equal(state.Name) || // !plan.Description.Equal(state.Description) || @@ -276,87 +277,82 @@ func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateReques // in.ComplexArgument = expandComplexArgument(tfList) // } - + // // TIP: -- 4. Call the AWS modify/update function // out, err := conn.UpdateDataLake(ctx, in) // if err != nil { // resp.Diagnostics.AddError( - // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.String(), err), + // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.ValueString(), err), // err.Error(), // ) // return // } // if out == nil || out.DataLake == nil { // resp.Diagnostics.AddError( - // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.String(), nil), + // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.ValueString(), nil), // errors.New("empty output").Error(), // ) // return // } - + // // TIP: Using the output from the update function, re-set any computed attributes // plan.ARN = flex.StringToFramework(ctx, out.DataLake.Arn) // plan.ID = flex.StringToFramework(ctx, out.DataLake.DataLakeId) // } - // // TIP: -- 5. Use a waiter to wait for update to complete // updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) // _, err := waitDataLakeUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) // if err != nil { // resp.Diagnostics.AddError( - // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForUpdate, ResNameDataLake, plan.ID.String(), err), + // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForUpdate, ResNameDataLake, plan.ID.ValueString(), err), // err.Error(), // ) // return // } - // // TIP: -- 6. Save the request plan to response state // resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } func (r *resourceDataLake) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - // conn := r.Meta().SecurityLakeClient(ctx) - - // // TIP: -- 2. Fetch the state - // var state resourceDataLakeData - // resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - // if resp.Diagnostics.HasError() { - // return - // } - - // // TIP: -- 3. Populate a delete input structure - // in := &securitylake.DeleteDataLakeInput{ - // DataLakeId: aws.String(state.ID.ValueString()), - // } - - // // TIP: -- 4. Call the AWS delete function - // _, err := conn.DeleteDataLake(ctx, in) - // // TIP: On rare occassions, the API returns a not found error after deleting a - // // resource. If that happens, we don't want it to show up as an error. - // if err != nil { - // var nfe *awstypes.ResourceNotFoundException - // if errors.As(err, &nfe) { - // return - // } - // resp.Diagnostics.AddError( - // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionDeleting, ResNameDataLake, state.ID.String(), err), - // err.Error(), - // ) - // return - // } - - // // TIP: -- 5. Use a waiter to wait for delete to complete - // deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) - // _, err = waitDataLakeDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) - // if err != nil { - // resp.Diagnostics.AddError( - // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForDeletion, ResNameDataLake, state.ID.String(), err), - // err.Error(), - // ) - // return - // } + conn := r.Meta().SecurityLakeClient(ctx) + + var state resourceDataLakeData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + region, _ := extractRegionFromARN(state.ID.ValueString()) + + in := &securitylake.DeleteDataLakeInput{ + Regions: []string{region}, + } + + _, err := conn.DeleteDataLake(ctx, in) + + if err != nil { + var nfe *awstypes.ResourceNotFoundException + if errors.As(err, &nfe) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionDeleting, ResNameDataLake, state.ID.String(), err), + err.Error(), + ) + return + } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitDataLakeDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForDeletion, ResNameDataLake, state.ID.String(), err), + err.Error(), + ) + return + } } func (r *resourceDataLake) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { @@ -364,6 +360,7 @@ func (r *resourceDataLake) ImportState(ctx context.Context, req resource.ImportS } func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { + fmt.Println(id) stateConf := &retry.StateChangeConf{ Pending: []string{string(awstypes.DataLakeStatusInitialized)}, Target: []string{string(awstypes.DataLakeStatusCompleted)}, @@ -381,10 +378,6 @@ func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id stri return nil, err } -// TIP: It is easier to determine whether a resource is updated for some -// resources than others. The best case is a status flag that tells you when -// the update has been fully realized. Other times, you can check to see if a -// key resource argument is updated to a new value or not. func waitDataLakeUpdated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { stateConf := &retry.StateChangeConf{ Pending: []string{string(awstypes.DataLakeStatusPending)}, @@ -403,13 +396,13 @@ func waitDataLakeUpdated(ctx context.Context, conn *securitylake.Client, id stri return nil, err } - func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { + fmt.Println(id) stateConf := &retry.StateChangeConf{ - Pending: []string{string(awstypes.DataLakeStatusPending), string(awstypes.DataLakeStatusCompleted)}, - Target: []string{}, - Refresh: updateStatusDataLake(ctx, conn, id), - Timeout: timeout, + Pending: []string{string(awstypes.DataLakeStatusInitialized), string(awstypes.DataLakeStatusCompleted)}, + Target: []string{}, + Refresh: createStatusDataLake(ctx, conn, id), + Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -420,10 +413,10 @@ func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, id stri return nil, err } - func createStatusDataLake(ctx context.Context, conn *securitylake.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findDataLakeByID(ctx, conn, id) + fmt.Println(id) + out, err := FindDataLakeByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil } @@ -431,13 +424,14 @@ func createStatusDataLake(ctx context.Context, conn *securitylake.Client, id str if err != nil { return nil, "", err } - return out, string(out.CreateStatus), nil + + return out, string(out.DataLakes[0].CreateStatus), nil } } func updateStatusDataLake(ctx context.Context, conn *securitylake.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findDataLakeByID(ctx, conn, id) + out, err := FindDataLakeByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil } @@ -445,17 +439,20 @@ func updateStatusDataLake(ctx context.Context, conn *securitylake.Client, id str if err != nil { return nil, "", err } - return out, string(out.UpdateStatus.Status), nil + return out, string(out.DataLakes[0].UpdateStatus.Status), nil } } -func findDataLakeByID(ctx context.Context, conn *securitylake.Client, id string) (*awstypes.DataLakeResource, error) { - region := extractRegionFromID(id) - +func FindDataLakeByID(ctx context.Context, conn *securitylake.Client, id string) (*securitylake.ListDataLakesOutput, error) { + region, err := extractRegionFromARN(id) + if err != nil { + return nil, err + } + fmt.Printf("The region is %s\n", region) in := &securitylake.ListDataLakesInput{ Regions: []string{region}, } - + out, err := conn.ListDataLakes(ctx, in) if err != nil { var nfe *awstypes.ResourceNotFoundException @@ -469,73 +466,170 @@ func findDataLakeByID(ctx context.Context, conn *securitylake.Client, id string) return nil, err } - if out == nil { + if out == nil || out.DataLakes == nil { return nil, tfresource.NewEmptyResultError(in) } - return &out.DataLakes[0], nil + return out, nil } +func flattenDataLakeConfigurations(ctx context.Context, apiObjects []awstypes.DataLakeResource) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: dataLakeConfigurations} -// func flattenComplexArgument(ctx context.Context, apiObject *securitylake.ComplexArgument) (types.List, diag.Diagnostics) { -// var diags diag.Diagnostics -// elemType := types.ObjectType{AttrTypes: complexArgumentAttrTypes} + if len(apiObjects) == 0 { + return types.ListNull(elemType), diags + } -// if apiObject == nil { -// return types.ListNull(elemType), diags -// } + elems := []attr.Value{} + for _, apiObject := range apiObjects { -// obj := map[string]attr.Value{ -// "nested_required": flex.StringValueToFramework(ctx, apiObject.NestedRequired), -// "nested_optional": flex.StringValueToFramework(ctx, apiObject.NestedOptional), -// } -// objVal, d := types.ObjectValue(complexArgumentAttrTypes, obj) -// diags.Append(d...) + encryptionConfiguration, d := flattenEncryptionConfiguration(ctx, apiObject.EncryptionConfiguration) + diags.Append(d...) + lifecycleExpiration, d := flattenLifeCycleConfiguration(ctx, apiObject.LifecycleConfiguration) + diags.Append(d...) + replicationConfiguration, d := flattenReplicationConfiguration(ctx, apiObject.ReplicationConfiguration) + diags.Append(d...) -// listVal, d := types.ListValue(elemType, []attr.Value{objVal}) -// diags.Append(d...) + obj := map[string]attr.Value{ + "encryption_configuration": encryptionConfiguration, + "lifecycle_configuration": lifecycleExpiration, + "region": flex.StringToFramework(ctx, apiObject.Region), + "replication_configuration": replicationConfiguration, + } + objVal, d := types.ObjectValue(dataLakeConfigurations, obj) + diags.Append(d...) -// return listVal, diags -// } + elems = append(elems, objVal) + } -// func flattenComplexArguments(ctx context.Context, apiObjects []*securitylake.ComplexArgument) (types.List, diag.Diagnostics) { -// var diags diag.Diagnostics -// elemType := types.ObjectType{AttrTypes: complexArgumentAttrTypes} + listVal, d := types.ListValue(elemType, elems) + diags.Append(d...) -// if len(apiObjects) == 0 { -// return types.ListNull(elemType), diags -// } + return listVal, diags +} + +func flattenLifeCycleConfiguration(ctx context.Context, apiObject *awstypes.DataLakeLifecycleConfiguration) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTypes} -// elems := []attr.Value{} -// for _, apiObject := range apiObjects { -// if apiObject == nil { -// continue -// } + if apiObject == nil { + return types.ListNull(elemType), diags + } -// obj := map[string]attr.Value{ -// "nested_required": flex.StringValueToFramework(ctx, apiObject.NestedRequired), -// "nested_optional": flex.StringValueToFramework(ctx, apiObject.NestedOptional), -// } -// objVal, d := types.ObjectValue(complexArgumentAttrTypes, obj) -// diags.Append(d...) + expiration, d := flattenLifecycleExpiration(ctx, apiObject.Expiration) + diags.Append(d...) + transitions, d := flattenLifecycleTransitions(ctx, apiObject.Transitions) + diags.Append(d...) -// elems = append(elems, objVal) -// } + obj := map[string]attr.Value{ + "expiration": expiration, + "transitions": transitions, + } + objVal, d := types.ObjectValue(dataLakeConfigurationsLifecycleTypes, obj) + diags.Append(d...) -// listVal, d := types.ListValue(elemType, elems) -// diags.Append(d...) + listVal, d := types.ListValue(elemType, []attr.Value{objVal}) + diags.Append(d...) -// return listVal, diags -// } + return listVal, diags +} + +func flattenEncryptionConfiguration(ctx context.Context, apiObject *awstypes.DataLakeEncryptionConfiguration) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsEncryptionTypes} + + if apiObject == nil { + return types.ListNull(elemType), diags + } + + obj := map[string]attr.Value{ + "kms_key_id": flex.StringToFramework(ctx, apiObject.KmsKeyId), + } + objVal, d := types.ObjectValue(dataLakeConfigurationsEncryptionTypes, obj) + diags.Append(d...) + + listVal, d := types.ListValue(elemType, []attr.Value{objVal}) + diags.Append(d...) + + return listVal, diags +} + +func flattenLifecycleExpiration(ctx context.Context, apiObject *awstypes.DataLakeLifecycleExpiration) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes} + + if apiObject == nil { + return types.ListNull(elemType), diags + } + + obj := map[string]attr.Value{ + "days": flex.Int32ToFramework(ctx, apiObject.Days), + } + + objVal, d := types.ObjectValue(dataLakeConfigurationsLifecycleExpirationTypes, obj) + diags.Append(d...) + + listVal, d := types.ListValue(elemType, []attr.Value{objVal}) + diags.Append(d...) + + return listVal, diags +} + +func flattenLifecycleTransitions(ctx context.Context, apiObjects []awstypes.DataLakeLifecycleTransition) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes} + + if len(apiObjects) == 0 { + return types.ListNull(elemType), diags + } + + elems := []attr.Value{} + for _, apiObject := range apiObjects { + obj := map[string]attr.Value{ + "days": flex.Int32ToFramework(ctx, apiObject.Days), + "storage_class": flex.StringToFramework(ctx, apiObject.StorageClass), + } + objVal, d := types.ObjectValue(dataLakeConfigurationsLifecycleTransitionsTypes, obj) + diags.Append(d...) + + elems = append(elems, objVal) + } + + listVal, d := types.ListValue(elemType, elems) + diags.Append(d...) + + return listVal, diags +} + +func flattenReplicationConfiguration(ctx context.Context, apiObject *awstypes.DataLakeReplicationConfiguration) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsReplicationConfigurationTypes} + if apiObject == nil { + return types.ListNull(elemType), diags + } + + obj := map[string]attr.Value{ + "role_arn": flex.StringToFramework(ctx, apiObject.RoleArn), + "regions": flex.FlattenFrameworkStringValueList(ctx, apiObject.Regions), + } + objVal, d := types.ObjectValue(dataLakeConfigurationsReplicationConfigurationTypes, obj) + diags.Append(d...) + + listVal, d := types.ListValue(elemType, []attr.Value{objVal}) + diags.Append(d...) + + return listVal, diags +} func expanddataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigurationsData) []awstypes.DataLakeConfiguration { - var diags diag.Diagnostics + var diags diag.Diagnostics if len(tfList) == 0 { - return nil - } + return nil + } - var apiObject []awstypes.DataLakeConfiguration + var apiObject []awstypes.DataLakeConfiguration var encryptionConfiguration []dataLakeConfigurationsEncryption var lifecycleConfiguration []dataLakeConfigurationsLifecycle var replicationConfiguration []dataLakeConfigurationsReplicationConfiguration @@ -554,7 +648,7 @@ func expanddataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigur } if !tfObj.LifecycleConfiguration.IsNull() { - item.LifecycleConfiguration,_ = expandLifecycleConfiguration(ctx, lifecycleConfiguration) + item.LifecycleConfiguration, _ = expandLifecycleConfiguration(ctx, lifecycleConfiguration) } if !tfObj.ReplicationConfiguration.IsNull() { @@ -582,8 +676,7 @@ func expandEncryptionConfiguration(tfList []dataLakeConfigurationsEncryption) *a return apiObject } - -func expandLifecycleConfiguration(ctx context.Context, tfList []dataLakeConfigurationsLifecycle) (*awstypes.DataLakeLifecycleConfiguration,diag.Diagnostics) { +func expandLifecycleConfiguration(ctx context.Context, tfList []dataLakeConfigurationsLifecycle) (*awstypes.DataLakeLifecycleConfiguration, diag.Diagnostics) { var diags diag.Diagnostics if len(tfList) == 0 { @@ -625,11 +718,11 @@ func expandLifecycleExpiration(tfList []dataLakeConfigurationsLifecycleExpiratio } func expandLifecycleTransitions(tfList []dataLakeConfigurationsLifecycleTransitions) []awstypes.DataLakeLifecycleTransition { - if len(tfList) == 0 { - return nil - } + if len(tfList) == 0 { + return nil + } - var apiObject []awstypes.DataLakeLifecycleTransition + var apiObject []awstypes.DataLakeLifecycleTransition for _, tfObj := range tfList { item := awstypes.DataLakeLifecycleTransition{} @@ -668,47 +761,53 @@ func expandReplicationConfiguration(ctx context.Context, tfList []dataLakeConfig return apiObject } - var ( + dataLakeConfigurations = map[string]attr.Type{ + "encryption_configuration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsEncryptionTypes}}, + "lifecycle_configuration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTypes}}, + "region": types.StringType, + "replication_configuration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsReplicationConfigurationTypes}}, + } + dataLakeConfigurationsEncryptionTypes = map[string]attr.Type{ - "kms_key_id": types.StringType, + "kms_key_id": types.StringType, } dataLakeConfigurationsLifecycleExpirationTypes = map[string]attr.Type{ - "days": types.Int64Type, + "days": types.Int64Type, } dataLakeConfigurationsLifecycleTransitionsTypes = map[string]attr.Type{ - "days": types.Int64Type, + "days": types.Int64Type, "storage_class": types.StringType, } dataLakeConfigurationsLifecycleTypes = map[string]attr.Type{ - "expiration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes}}, + "expiration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes}}, "transitions": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes}}, } dataLakeConfigurationsReplicationConfigurationTypes = map[string]attr.Type{ - "role_arn": types.StringType, - "regions": types.ListType{ElemType: types.StringType}, + "role_arn": types.StringType, + "regions": types.ListType{ElemType: types.StringType}, } ) type resourceDataLakeData struct { - ARN types.String `tfsdk:"arn"` - MetastoreManagerRoleArn types.String `tfsdk:"metastore_manager_role_arn"` - ID types.String `tfsdk:"id"` - Configurations types.List `tfsdk:"configurations"` - Tags types.Map `tfsdk:"tags"` - TagsAll types.Map `tfsdk:"tags_all"` - Timeouts timeouts.Value `tfsdk:"timeouts"` + ARN types.String `tfsdk:"arn"` + ID types.String `tfsdk:"id"` + MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` + Configurations types.List `tfsdk:"configurations"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` } type dataLakeConfigurationsData struct { - EncryptionConfiguration types.Set `tfsdk:"encryption_configuration"` - LifecycleConfiguration types.Set `tfsdk:"lifecycle_configuration"` - Region types.String `tfsdk:"nested_optional"` - ReplicationConfiguration types.Set `tfsdk:"replication_configuration"` + EncryptionConfiguration types.Set `tfsdk:"encryption_configuration"` + LifecycleConfiguration types.Set `tfsdk:"lifecycle_configuration"` + Region types.String `tfsdk:"region"` + ReplicationConfiguration types.Set `tfsdk:"replication_configuration"` } type dataLakeConfigurationsEncryption struct { @@ -716,33 +815,60 @@ type dataLakeConfigurationsEncryption struct { } type dataLakeConfigurationsLifecycle struct { - Expiration types.Set `tfsdk:"expiration"` - Transitions types.Set `tfsdk:"transitions"` + Expiration types.Set `tfsdk:"expiration"` + Transitions types.Set `tfsdk:"transitions"` } type dataLakeConfigurationsLifecycleExpiration struct { - Days types.Int64 `tfsdk:"days"` + Days types.Int64 `tfsdk:"days"` } type dataLakeConfigurationsLifecycleTransitions struct { - Days types.Int64 `tfsdk:"days"` - StorageClass types.String `tfsdk:"storage_class"` + Days types.Int64 `tfsdk:"days"` + StorageClass types.String `tfsdk:"storage_class"` } type dataLakeConfigurationsReplicationConfiguration struct { RoleArn types.String `tfsdk:"role_arn"` - Regions types.List `tfsdk:"regions"` + Regions types.List `tfsdk:"regions"` } -func generateDataLakeID(arn, region string) string { - return arn + "|" + region +func extractRegionFromARN(arn string) (string, error) { + parts := strings.Split(arn, ":") + if len(parts) < 4 { + return "", fmt.Errorf("invalid ARN format") + } + return parts[3], nil } -func extractRegionFromID(id string) string { - parts := strings.Split(id, "|") - if len(parts) < 2 { - // Handle error or return a default value - return "" - } - return parts[1] -} \ No newline at end of file +// refreshFromOutput writes state data from an AWS response object +// func (rd *resourceDataLakeData) refreshFromOutput(ctx context.Context, out *awstypes.DataLakeResource) diag.Diagnostics { +// var diags diag.Diagnostics + +// if out == nil { +// return diags +// } + +// rd.ARN = flex.StringToFramework(ctx, out.DataLakeArn) +// rd.Configurations, d = flattenDataLakeConfigurations(ctx, out) +// if out.Framework != nil { +// rd.FrameworkID = flex.StringToFramework(ctx, out.Framework.Id) +// } +// rd.ID = flex.StringToFramework(ctx, metadata.Id) +// rd.Name = flex.StringToFramework(ctx, metadata.Name) +// rd.Status = flex.StringValueToFramework(ctx, metadata.Status) + +// reportsDestination, d := flattenAssessmentReportsDestination(ctx, metadata.AssessmentReportsDestination) +// diags.Append(d...) +// rd.AssessmentReportsDestination = reportsDestination +// roles, d := flattenAssessmentRoles(ctx, metadata.Roles) +// diags.Append(d...) +// rd.RolesAll = roles +// scope, d := flattenAssessmentScope(ctx, metadata.Scope) +// diags.Append(d...) +// rd.Scope = scope + +// setTagsOut(ctx, out.Tags) + +// return diags +// } diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index ea45cf2ee796..94452155d8f8 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -1,222 +1,102 @@ package securitylake_test -// **PLEASE DELETE THIS AND ALL TIP COMMENTS BEFORE SUBMITTING A PR FOR REVIEW!** -// -// TIP: ==== INTRODUCTION ==== -// Thank you for trying the skaff tool! -// -// You have opted to include these helpful comments. They all include "TIP:" -// to help you find and remove them when you're done with them. -// -// While some aspects of this file are customized to your input, the -// scaffold tool does *not* look at the AWS API and ensure it has correct -// function, structure, and variable names. It makes guesses based on -// commonalities. You will need to make significant adjustments. -// -// In other words, as generated, this is a rough outline of the work you will -// need to do. If something doesn't make sense for your situation, get rid of -// it. import ( - // TIP: ==== IMPORTS ==== - // This is a common set of imports but not customized to your code since - // your code hasn't been written yet. Make sure you, your IDE, or - // goimports -w fixes these imports. - // - // The provider linter wants your imports to be in two groups: first, - // standard library (i.e., "fmt" or "strings"), second, everything else. - // - // Also, AWS Go SDK v2 may handle nested structures differently than v1, - // using the services/securitylake/types package. If so, you'll - // need to import types and reference the nested types, e.g., as - // types.. "context" + "errors" "fmt" - "regexp" - "strings" "testing" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/securitylake" "github.com/aws/aws-sdk-go-v2/service/securitylake/types" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" - "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/names" - - // TIP: You will often need to import the package that this test file lives - // in. Since it is in the "test" context, it must import the package to use - // any normal context constants, variables, or functions. tfsecuritylake "github.com/hashicorp/terraform-provider-aws/internal/service/securitylake" + "github.com/hashicorp/terraform-provider-aws/names" ) -// TIP: File Structure. The basic outline for all test files should be as -// follows. Improve this resource's maintainability by following this -// outline. -// -// 1. Package declaration (add "_test" since this is a test file) -// 2. Imports -// 3. Unit tests -// 4. Basic test -// 5. Disappears test -// 6. All the other tests -// 7. Helper functions (exists, destroy, check, etc.) -// 8. Functions that return Terraform configurations - -// TIP: ==== UNIT TESTS ==== -// This is an example of a unit test. Its name is not prefixed with -// "TestAcc" like an acceptance test. -// -// Unlike acceptance tests, unit tests do not access AWS and are focused on a -// function (or method). Because of this, they are quick and cheap to run. -// -// In designing a resource's implementation, isolate complex bits from AWS bits -// so that they can be tested through a unit test. We encourage more unit tests -// in the provider. -// -// Cut and dry functions using well-used patterns, like typical flatteners and -// expanders, don't need unit testing. However, if they are complex or -// intricate, they should be unit tested. -func TestDataLakeExampleUnitTest(t *testing.T) { - testCases := []struct { - TestName string - Input string - Expected string - Error bool - }{ - { - TestName: "empty", - Input: "", - Expected: "", - Error: true, - }, - { - TestName: "descriptive name", - Input: "some input", - Expected: "some output", - Error: false, - }, - { - TestName: "another descriptive name", - Input: "more input", - Expected: "more output", - Error: false, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.TestName, func(t *testing.T) { - got, err := tfsecuritylake.FunctionFromResource(testCase.Input) - - if err != nil && !testCase.Error { - t.Errorf("got error (%s), expected no error", err) - } - - if err == nil && testCase.Error { - t.Errorf("got (%s) and no error, expected error", got) - } - - if got != testCase.Expected { - t.Errorf("got %s, expected %s", got, testCase.Expected) - } - }) - } -} - -// TIP: ==== ACCEPTANCE TESTS ==== -// This is an example of a basic acceptance test. This should test as much of -// standard functionality of the resource as possible, and test importing, if -// applicable. We prefix its name with "TestAcc", the service, and the -// resource name. -// -// Acceptance test access AWS and cost money to run. func TestAccSecurityLakeDataLake_basic(t *testing.T) { ctx := acctest.Context(t) - // TIP: This is a long-running test guard for tests that run longer than - // 300s (5 min) generally. if testing.Short() { t.Skip("skipping long-running test in short mode") } - var datalake securitylake.DescribeDataLakeResponse - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + var datalake types.DataLakeResource + // rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.SecurityLakeEndpointID) - testAccPreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataLakeConfig_basic(rName), + Config: testAccDataLakeConfig_basic(), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), - resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), - resource.TestCheckResourceAttrSet(resourceName, "maintenance_window_start_time.0.day_of_week"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user.*", map[string]string{ - "console_access": "false", - "groups.#": "0", - "username": "Test", - "password": "TestTest1234", - }), - acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "securitylake", regexp.MustCompile(`datalake:+.`)), + // resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), + // resource.TestCheckResourceAttrSet(resourceName, "maintenance_window_start_time.0.day_of_week"), + // resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user.*", map[string]string{ + // "console_access": "false", + // "groups.#": "0", + // "username": "Test", + // "password": "TestTest1234", + // }), + // acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "securitylake", regexp.MustCompile(`data-lake/:+.`)), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, }, }, }) } -func TestAccSecurityLakeDataLake_disappears(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var datalake securitylake.DescribeDataLakeResponse - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_securitylake_data_lake.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.SecurityLakeEndpointID) - testAccPreCheck(t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDataLakeDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDataLakeConfig_basic(rName, testAccDataLakeVersionNewer), - Check: resource.ComposeTestCheckFunc( - testAccCheckDataLakeExists(ctx, resourceName, &datalake), - // TIP: The Plugin-Framework disappears helper is similar to the Plugin-SDK version, - // but expects a new resource factory function as the third argument. To expose this - // private function to the testing package, you may need to add a line like the following - // to exports_test.go: - // - // var ResourceDataLake = newResourceDataLake - acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfsecuritylake.ResourceDataLake, resourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} +// func TestAccSecurityLakeDataLake_disappears(t *testing.T) { +// ctx := acctest.Context(t) +// if testing.Short() { +// t.Skip("skipping long-running test in short mode") +// } + +// var datalake securitylake.DescribeDataLakeResponse +// rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) +// resourceName := "aws_securitylake_data_lake.test" + +// resource.ParallelTest(t, resource.TestCase{ +// PreCheck: func() { +// acctest.PreCheck(ctx, t) +// acctest.PreCheckPartitionHasService(t, names.SecurityLakeEndpointID) +// testAccPreCheck(t) +// }, +// ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), +// ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, +// CheckDestroy: testAccCheckDataLakeDestroy(ctx), +// Steps: []resource.TestStep{ +// { +// Config: testAccDataLakeConfig_basic(rName, testAccDataLakeVersionNewer), +// Check: resource.ComposeTestCheckFunc( +// testAccCheckDataLakeExists(ctx, resourceName, &datalake), +// // TIP: The Plugin-Framework disappears helper is similar to the Plugin-SDK version, +// // but expects a new resource factory function as the third argument. To expose this +// // private function to the testing package, you may need to add a line like the following +// // to exports_test.go: +// // +// // var ResourceDataLake = newResourceDataLake +// acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfsecuritylake.ResourceDataLake, resourceName), +// ), +// ExpectNonEmptyPlan: true, +// }, +// }, +// }) +// } func testAccCheckDataLakeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -227,17 +107,13 @@ func testAccCheckDataLakeDestroy(ctx context.Context) resource.TestCheckFunc { continue } - input := &securitylake.DescribeDataLakeInput{ - DataLakeId: aws.String(rs.Primary.ID), - } - _, err := conn.DescribeDataLake(ctx, &securitylake.DescribeDataLakeInput{ - DataLakeId: aws.String(rs.Primary.ID), - }) - if errs.IsA[*types.ResourceNotFoundException](err){ - return nil - } + _, err := tfsecuritylake.FindDataLakeByID(ctx, conn, rs.Primary.ID) if err != nil { - return nil + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err } return create.Error(names.SecurityLake, create.ErrActionCheckingDestroyed, tfsecuritylake.ResNameDataLake, rs.Primary.ID, errors.New("not destroyed")) @@ -247,7 +123,7 @@ func testAccCheckDataLakeDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *securitylake.DescribeDataLakeResponse) resource.TestCheckFunc { +func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *types.DataLakeResource) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -259,67 +135,27 @@ func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *secu } conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityLakeClient(ctx) - resp, err := conn.DescribeDataLake(ctx, &securitylake.DescribeDataLakeInput{ - DataLakeId: aws.String(rs.Primary.ID), - }) - + resp, err := tfsecuritylake.FindDataLakeByID(ctx, conn, rs.Primary.ID) if err != nil { return create.Error(names.SecurityLake, create.ErrActionCheckingExistence, tfsecuritylake.ResNameDataLake, rs.Primary.ID, err) } - *datalake = *resp - - return nil - } -} - -func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityLakeClient(ctx) + dl := &resp.DataLakes[0] - input := &securitylake.ListDataLakesInput{} - _, err := conn.ListDataLakes(ctx, input) - - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} - -func testAccCheckDataLakeNotRecreated(before, after *securitylake.DescribeDataLakeResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - if before, after := aws.ToString(before.DataLakeId), aws.ToString(after.DataLakeId); before != after { - return create.Error(names.SecurityLake, create.ErrActionCheckingNotRecreated, tfsecuritylake.ResNameDataLake, aws.ToString(before.DataLakeId), errors.New("recreated")) - } + *datalake = *dl return nil } } -func testAccDataLakeConfig_basic(rName, version string) string { +func testAccDataLakeConfig_basic() string { return fmt.Sprintf(` -resource "aws_security_group" "test" { - name = %[1]q -} - -resource "aws_securitylake_data_lake" "test" { - data_lake_name = %[1]q - engine_type = "ActiveSecurityLake" - engine_version = %[2]q - host_instance_type = "securitylake.t2.micro" - security_groups = [aws_security_group.test.id] - authentication_strategy = "simple" - storage_type = "efs" - - logs { - general = true - } - - user { - username = "Test" - password = "TestTest1234" - } -} -`, rName, version) + resource "aws_securitylake_data_lake" "test" { + meta_store_manager_role_arn = "arn:aws:iam::182198062889:role/service-role/AmazonSecurityLakeMetaStoreManager" + + configurations { + region = "eu-west-2" + } + } +`) } diff --git a/internal/service/securitylake/generate.go b/internal/service/securitylake/generate.go index 311d0a091373..6e045f9022e5 100644 --- a/internal/service/securitylake/generate.go +++ b/internal/service/securitylake/generate.go @@ -1,6 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ServiceTagsMap -KVTValues -SkipTypesImp -ListTags -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/website/docs/r/securitylake_data_lake2.html.markdown b/website/docs/r/securitylake_data_lake2.html.markdown new file mode 100644 index 000000000000..d66644bb3fa8 --- /dev/null +++ b/website/docs/r/securitylake_data_lake2.html.markdown @@ -0,0 +1,60 @@ +--- +subcategory: "Security Lake" +layout: "aws" +page_title: "AWS: aws_securitylake_data_lake2" +description: |- + Terraform resource for managing an AWS Security Lake Data Lake2. +--- +` +# Resource: aws_securitylake_data_lake2 + +Terraform resource for managing an AWS Security Lake Data Lake2. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_securitylake_data_lake2" "example" { +} +``` + +## Argument Reference + +The following arguments are required: + +* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +The following arguments are optional: + +* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - ARN of the Data Lake2. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `60m`) +* `update` - (Default `180m`) +* `delete` - (Default `90m`) + +## Import + +Security Lake Data Lake2 can be imported using the `example_id_arg`, e.g., + +``` +$ terraform import aws_securitylake_data_lake2.example rft-8012925589 +``` diff --git a/website/docs/r/securitylake_exception_subscription.html.markdown b/website/docs/r/securitylake_exception_subscription.html.markdown new file mode 100644 index 000000000000..b506af5ae935 --- /dev/null +++ b/website/docs/r/securitylake_exception_subscription.html.markdown @@ -0,0 +1,60 @@ +--- +subcategory: "Security Lake" +layout: "aws" +page_title: "AWS: aws_securitylake_exception_subscription" +description: |- + Terraform resource for managing an AWS Security Lake Exception Subscription. +--- +` +# Resource: aws_securitylake_exception_subscription + +Terraform resource for managing an AWS Security Lake Exception Subscription. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_securitylake_exception_subscription" "example" { +} +``` + +## Argument Reference + +The following arguments are required: + +* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +The following arguments are optional: + +* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - ARN of the Exception Subscription. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `60m`) +* `update` - (Default `180m`) +* `delete` - (Default `90m`) + +## Import + +Security Lake Exception Subscription can be imported using the `example_id_arg`, e.g., + +``` +$ terraform import aws_securitylake_exception_subscription.example rft-8012925589 +``` From d59ad48087512f03e1fedeae8e26b7a1171d1708 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Wed, 22 Nov 2023 17:48:10 +0000 Subject: [PATCH 03/45] Successfull Creation part --- internal/service/securitylake/data_lake.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 1dbc1f9ac1c2..f062093eb18f 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -189,7 +189,6 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques plan.ID = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) state := plan - createTimeout := r.CreateTimeout(ctx, state.Timeouts) _, err = waitDataLakeCreated(ctx, conn, state.ID.ValueString(), createTimeout) if err != nil { @@ -230,7 +229,7 @@ func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, r state.ARN = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) state.ID = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) - state.Configurations ,_ = flattenDataLakeConfigurations(ctx, out.DataLakes) + state.Configurations, _ = flattenDataLakeConfigurations(ctx, out.DataLakes) fmt.Println(state.ID.ValueString()) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) From 094c0b60d21bebcd02bcde22d29fa97602461548 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Sun, 26 Nov 2023 12:34:09 +0000 Subject: [PATCH 04/45] Succesfull first test --- internal/service/securitylake/data_lake.go | 352 ++++++++---------- .../service/securitylake/data_lake_test.go | 35 +- internal/service/securitylake/generate.go | 2 +- .../securitylake/service_package_gen.go | 3 - internal/service/securitylake/tags_gen.go | 146 ++++++++ 5 files changed, 330 insertions(+), 208 deletions(-) create mode 100644 internal/service/securitylake/tags_gen.go diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index f062093eb18f..7ceb0f3f9ebd 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -18,22 +18,22 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" - // "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" - // "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" - tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + // tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // Function annotations are used for resource registration to the Provider. DO NOT EDIT. // @FrameworkResource(name="Data Lake") -// @Tags(identifierAttribute="arn") func newResourceDataLake(_ context.Context) (resource.ResourceWithConfigure, error) { r := &resourceDataLake{} @@ -64,15 +64,17 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques "id": framework.IDAttribute(), "meta_store_manager_role_arn": schema.StringAttribute{ Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, }, - names.AttrTags: tftags.TagsAttribute(), - names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + // names.AttrTags: tftags.TagsAttribute(), + // names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), }, Blocks: map[string]schema.Block{ - "configurations": schema.ListNestedBlock{ - Validators: []validator.List{ - listvalidator.SizeAtLeast(1), - listvalidator.SizeAtMost(1), + "configurations": schema.SetNestedBlock{ + Validators: []validator.Set{ + setvalidator.SizeAtLeast(1), }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ @@ -81,25 +83,30 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques }, }, Blocks: map[string]schema.Block{ - "encryption_configuration": schema.SetNestedBlock{ - Validators: []validator.Set{ - setvalidator.SizeAtLeast(1), + "encryption_configuration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtMost(1), }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "kms_key_id": schema.StringAttribute{ Optional: true, + Computed: true, + Default: stringdefault.StaticString("S3_MANAGED_KEY"), }, }, }, }, - "lifecycle_configuration": schema.SetNestedBlock{ - Validators: []validator.Set{ - setvalidator.SizeAtLeast(1), + "lifecycle_configuration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtMost(1), }, NestedObject: schema.NestedBlockObject{ Blocks: map[string]schema.Block{ - "expiration": schema.SetNestedBlock{ + "expiration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "days": schema.Int64Attribute{ @@ -109,6 +116,9 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques }, }, "transitions": schema.SetNestedBlock{ + Validators: []validator.Set{ + setvalidator.SizeAtMost(1), + }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "days": schema.Int64Attribute{ @@ -123,17 +133,18 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques }, }, }, - "replication_configuration": schema.SetNestedBlock{ - Validators: []validator.Set{ - setvalidator.SizeAtLeast(1), + "replication_configuration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtMost(1), }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "role_arn": schema.StringAttribute{ Optional: true, }, - "regions": schema.StringAttribute{ - Optional: true, + "regions": schema.ListAttribute{ + ElementType: types.StringType, + Optional: true, }, }, }, @@ -152,7 +163,9 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { conn := r.Meta().SecurityLakeClient(ctx) + var plan resourceDataLakeData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) if resp.Diagnostics.HasError() { return @@ -171,6 +184,7 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques } out, err := conn.CreateDataLake(ctx, in) + if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, plan.ID.ValueString(), err), @@ -178,36 +192,29 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques ) return } - if out == nil || out.DataLakes == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, plan.ID.ValueString(), nil), - errors.New("empty output").Error(), - ) - return - } + plan.ARN = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) - plan.ID = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) - state := plan - createTimeout := r.CreateTimeout(ctx, state.Timeouts) - _, err = waitDataLakeCreated(ctx, conn, state.ID.ValueString(), createTimeout) + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + waitOut, err := waitDataLakeCreated(ctx, conn, plan.ARN.ValueString(), createTimeout) + fmt.Println(waitOut) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, state.ID.ValueString(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, plan.ARN.ValueString(), err), err.Error(), ) return } - - state.Configurations, _ = flattenDataLakeConfigurations(ctx, out.DataLakes) - - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + state := plan + resp.Diagnostics.Append(state.refreshFromOutput(ctx, waitOut)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { conn := r.Meta().SecurityLakeClient(ctx) var state resourceDataLakeData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { return @@ -227,91 +234,62 @@ func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, r return } - state.ARN = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) - state.ID = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) - state.Configurations, _ = flattenDataLakeConfigurations(ctx, out.DataLakes) - - fmt.Println(state.ID.ValueString()) + resp.Diagnostics.Append(state.refreshFromOutput(ctx, out)...) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - // conn := r.Meta().SecurityLakeClient(ctx) - - // // TIP: -- 2. Fetch the plan - // var plan, state resourceDataLakeData - // resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - // resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - // if resp.Diagnostics.HasError() { - // return - // } - - // // TIP: -- 3. Populate a modify input structure and check for changes - // if !plan.Name.Equal(state.Name) || - // !plan.Description.Equal(state.Description) || - // !plan.ComplexArgument.Equal(state.ComplexArgument) || - // !plan.Type.Equal(state.Type) { - - // in := &securitylake.UpdateDataLakeInput{ - // // TIP: Mandatory or fields that will always be present can be set when - // // you create the Input structure. (Replace these with real fields.) - // DataLakeId: aws.String(plan.ID.ValueString()), - // DataLakeName: aws.String(plan.Name.ValueString()), - // DataLakeType: aws.String(plan.Type.ValueString()), - // } - - // if !plan.Description.IsNull() { - // // TIP: Optional fields should be set based on whether or not they are - // // used. - // in.Description = aws.String(plan.Description.ValueString()) - // } - // if !plan.ComplexArgument.IsNull() { - // // TIP: Use an expander to assign a complex argument. The elements must be - // // deserialized into the appropriate struct before being passed to the expander. - // var tfList []complexArgumentData - // resp.Diagnostics.Append(plan.ComplexArgument.ElementsAs(ctx, &tfList, false)...) - // if resp.Diagnostics.HasError() { - // return - // } - - // in.ComplexArgument = expandComplexArgument(tfList) - // } - - // // TIP: -- 4. Call the AWS modify/update function - // out, err := conn.UpdateDataLake(ctx, in) - // if err != nil { - // resp.Diagnostics.AddError( - // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.ValueString(), err), - // err.Error(), - // ) - // return - // } - // if out == nil || out.DataLake == nil { - // resp.Diagnostics.AddError( - // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.ValueString(), nil), - // errors.New("empty output").Error(), - // ) - // return - // } - - // // TIP: Using the output from the update function, re-set any computed attributes - // plan.ARN = flex.StringToFramework(ctx, out.DataLake.Arn) - // plan.ID = flex.StringToFramework(ctx, out.DataLake.DataLakeId) - // } - - // // TIP: -- 5. Use a waiter to wait for update to complete - // updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) - // _, err := waitDataLakeUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) - // if err != nil { - // resp.Diagnostics.AddError( - // create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForUpdate, ResNameDataLake, plan.ID.ValueString(), err), - // err.Error(), - // ) - // return - // } - - // // TIP: -- 6. Save the request plan to response state - // resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) + conn := r.Meta().SecurityLakeClient(ctx) + + var plan, state resourceDataLakeData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + if !plan.Configurations.Equal(state.Configurations) { + + var configurations []dataLakeConfigurationsData + resp.Diagnostics.Append(plan.Configurations.ElementsAs(ctx, &configurations, false)...) + if resp.Diagnostics.HasError() { + return + } + + in := &securitylake.UpdateDataLakeInput{ + Configurations: expanddataLakeConfigurations(ctx, configurations), + } + + out, err := conn.UpdateDataLake(ctx, in) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.ValueString(), err), + err.Error(), + ) + return + } + if out == nil || out.DataLakes == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.ValueString(), nil), + errors.New("empty output").Error(), + ) + return + } + + resp.Diagnostics.Append(state.refreshFromOutput(ctx, &out.DataLakes[0])...) + } + + updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) + _, err := waitDataLakeUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForUpdate, ResNameDataLake, plan.ID.ValueString(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } func (r *resourceDataLake) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { @@ -358,8 +336,7 @@ func (r *resourceDataLake) ImportState(ctx context.Context, req resource.ImportS resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } -func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { - fmt.Println(id) +func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*awstypes.DataLakeResource, error) { stateConf := &retry.StateChangeConf{ Pending: []string{string(awstypes.DataLakeStatusInitialized)}, Target: []string{string(awstypes.DataLakeStatusCompleted)}, @@ -370,7 +347,7 @@ func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id stri } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*securitylake.ListDataLakesOutput); ok { + if out, ok := outputRaw.(*awstypes.DataLakeResource); ok { return out, err } @@ -396,7 +373,6 @@ func waitDataLakeUpdated(ctx context.Context, conn *securitylake.Client, id stri } func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { - fmt.Println(id) stateConf := &retry.StateChangeConf{ Pending: []string{string(awstypes.DataLakeStatusInitialized), string(awstypes.DataLakeStatusCompleted)}, Target: []string{}, @@ -414,17 +390,15 @@ func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, id stri func createStatusDataLake(ctx context.Context, conn *securitylake.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - fmt.Println(id) out, err := FindDataLakeByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil } - if err != nil { return nil, "", err } - return out, string(out.DataLakes[0].CreateStatus), nil + return out, string(out.CreateStatus), nil } } @@ -438,16 +412,16 @@ func updateStatusDataLake(ctx context.Context, conn *securitylake.Client, id str if err != nil { return nil, "", err } - return out, string(out.DataLakes[0].UpdateStatus.Status), nil + return out, string(out.UpdateStatus.Status), nil } } -func FindDataLakeByID(ctx context.Context, conn *securitylake.Client, id string) (*securitylake.ListDataLakesOutput, error) { +func FindDataLakeByID(ctx context.Context, conn *securitylake.Client, id string) (*awstypes.DataLakeResource, error) { region, err := extractRegionFromARN(id) if err != nil { return nil, err } - fmt.Printf("The region is %s\n", region) + in := &securitylake.ListDataLakesInput{ Regions: []string{region}, } @@ -465,29 +439,33 @@ func FindDataLakeByID(ctx context.Context, conn *securitylake.Client, id string) return nil, err } - if out == nil || out.DataLakes == nil { + if out == nil || len(out.DataLakes) < 1 { return nil, tfresource.NewEmptyResultError(in) } + datalakeResource := out.DataLakes[0] - return out, nil + return &datalakeResource, nil } -func flattenDataLakeConfigurations(ctx context.Context, apiObjects []awstypes.DataLakeResource) (types.List, diag.Diagnostics) { +func flattenDataLakeConfigurations(ctx context.Context, apiObjects []*awstypes.DataLakeResource) (types.Set, diag.Diagnostics) { var diags diag.Diagnostics elemType := types.ObjectType{AttrTypes: dataLakeConfigurations} if len(apiObjects) == 0 { - return types.ListNull(elemType), diags + return types.SetNull(elemType), diags } elems := []attr.Value{} for _, apiObject := range apiObjects { encryptionConfiguration, d := flattenEncryptionConfiguration(ctx, apiObject.EncryptionConfiguration) + fmt.Println(encryptionConfiguration) diags.Append(d...) lifecycleExpiration, d := flattenLifeCycleConfiguration(ctx, apiObject.LifecycleConfiguration) + fmt.Println(lifecycleExpiration) diags.Append(d...) replicationConfiguration, d := flattenReplicationConfiguration(ctx, apiObject.ReplicationConfiguration) + fmt.Println(replicationConfiguration) diags.Append(d...) obj := map[string]attr.Value{ @@ -502,10 +480,10 @@ func flattenDataLakeConfigurations(ctx context.Context, apiObjects []awstypes.Da elems = append(elems, objVal) } - listVal, d := types.ListValue(elemType, elems) + setVal, d := types.SetValue(elemType, elems) diags.Append(d...) - return listVal, diags + return setVal, diags } func flattenLifeCycleConfiguration(ctx context.Context, apiObject *awstypes.DataLakeLifecycleConfiguration) (types.List, diag.Diagnostics) { @@ -558,7 +536,7 @@ func flattenLifecycleExpiration(ctx context.Context, apiObject *awstypes.DataLak var diags diag.Diagnostics elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes} - if apiObject == nil { + if apiObject == nil || apiObject.Days == nil { return types.ListNull(elemType), diags } @@ -575,12 +553,12 @@ func flattenLifecycleExpiration(ctx context.Context, apiObject *awstypes.DataLak return listVal, diags } -func flattenLifecycleTransitions(ctx context.Context, apiObjects []awstypes.DataLakeLifecycleTransition) (types.List, diag.Diagnostics) { +func flattenLifecycleTransitions(ctx context.Context, apiObjects []awstypes.DataLakeLifecycleTransition) (types.Set, diag.Diagnostics) { var diags diag.Diagnostics elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes} - if len(apiObjects) == 0 { - return types.ListNull(elemType), diags + if len(apiObjects) == 0 || (apiObjects[0].Days == nil && apiObjects[0].StorageClass == nil) { + return types.SetValueMust(elemType, []attr.Value{}), diags } elems := []attr.Value{} @@ -595,17 +573,17 @@ func flattenLifecycleTransitions(ctx context.Context, apiObjects []awstypes.Data elems = append(elems, objVal) } - listVal, d := types.ListValue(elemType, elems) + setVal, d := types.SetValue(elemType, elems) diags.Append(d...) - return listVal, diags + return setVal, diags } func flattenReplicationConfiguration(ctx context.Context, apiObject *awstypes.DataLakeReplicationConfiguration) (types.List, diag.Diagnostics) { var diags diag.Diagnostics elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsReplicationConfigurationTypes} - if apiObject == nil { + if apiObject == nil || (apiObject.Regions == nil && apiObject.RoleArn == nil) { return types.ListNull(elemType), diags } @@ -709,8 +687,7 @@ func expandLifecycleExpiration(tfList []dataLakeConfigurationsLifecycleExpiratio apiObject := &awstypes.DataLakeLifecycleExpiration{} if !tfObj.Days.IsNull() { - int32Days := int32(tfObj.Days.ValueInt64()) - apiObject.Days = aws.Int32(int32Days) + apiObject.Days = aws.Int32(int32(tfObj.Days.ValueInt64())) } return apiObject @@ -727,8 +704,7 @@ func expandLifecycleTransitions(tfList []dataLakeConfigurationsLifecycleTransiti item := awstypes.DataLakeLifecycleTransition{} if !tfObj.Days.IsNull() { - int32Days := int32(tfObj.Days.ValueInt64()) - item.Days = aws.Int32(int32Days) + item.Days = aws.Int32(int32(tfObj.Days.ValueInt64())) } if !tfObj.StorageClass.IsNull() { @@ -762,10 +738,10 @@ func expandReplicationConfiguration(ctx context.Context, tfList []dataLakeConfig var ( dataLakeConfigurations = map[string]attr.Type{ - "encryption_configuration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsEncryptionTypes}}, - "lifecycle_configuration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTypes}}, + "encryption_configuration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsEncryptionTypes}}, + "lifecycle_configuration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTypes}}, "region": types.StringType, - "replication_configuration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsReplicationConfigurationTypes}}, + "replication_configuration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsReplicationConfigurationTypes}}, } dataLakeConfigurationsEncryptionTypes = map[string]attr.Type{ @@ -782,7 +758,7 @@ var ( } dataLakeConfigurationsLifecycleTypes = map[string]attr.Type{ - "expiration": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes}}, + "expiration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes}}, "transitions": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes}}, } @@ -792,21 +768,25 @@ var ( } ) +// func (r *resourceDataLake) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { +// r.List(ctx, request, response) +// } + type resourceDataLakeData struct { - ARN types.String `tfsdk:"arn"` - ID types.String `tfsdk:"id"` - MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` - Configurations types.List `tfsdk:"configurations"` - Tags types.Map `tfsdk:"tags"` - TagsAll types.Map `tfsdk:"tags_all"` - Timeouts timeouts.Value `tfsdk:"timeouts"` + ARN types.String `tfsdk:"arn"` + ID types.String `tfsdk:"id"` + MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` + Configurations types.Set `tfsdk:"configurations"` + // Tags types.Map `tfsdk:"tags"` + // TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` } type dataLakeConfigurationsData struct { - EncryptionConfiguration types.Set `tfsdk:"encryption_configuration"` - LifecycleConfiguration types.Set `tfsdk:"lifecycle_configuration"` + EncryptionConfiguration types.List `tfsdk:"encryption_configuration"` + LifecycleConfiguration types.List `tfsdk:"lifecycle_configuration"` Region types.String `tfsdk:"region"` - ReplicationConfiguration types.Set `tfsdk:"replication_configuration"` + ReplicationConfiguration types.List `tfsdk:"replication_configuration"` } type dataLakeConfigurationsEncryption struct { @@ -814,8 +794,8 @@ type dataLakeConfigurationsEncryption struct { } type dataLakeConfigurationsLifecycle struct { - Expiration types.Set `tfsdk:"expiration"` - Transitions types.Set `tfsdk:"transitions"` + Expiration types.List `tfsdk:"expiration"` + Transitions types.Set `tfsdk:"transitions"` } type dataLakeConfigurationsLifecycleExpiration struct { @@ -835,39 +815,23 @@ type dataLakeConfigurationsReplicationConfiguration struct { func extractRegionFromARN(arn string) (string, error) { parts := strings.Split(arn, ":") if len(parts) < 4 { - return "", fmt.Errorf("invalid ARN format") + return "", fmt.Errorf("invalid ARN: %s", arn) } return parts[3], nil } -// refreshFromOutput writes state data from an AWS response object -// func (rd *resourceDataLakeData) refreshFromOutput(ctx context.Context, out *awstypes.DataLakeResource) diag.Diagnostics { -// var diags diag.Diagnostics - -// if out == nil { -// return diags -// } - -// rd.ARN = flex.StringToFramework(ctx, out.DataLakeArn) -// rd.Configurations, d = flattenDataLakeConfigurations(ctx, out) -// if out.Framework != nil { -// rd.FrameworkID = flex.StringToFramework(ctx, out.Framework.Id) -// } -// rd.ID = flex.StringToFramework(ctx, metadata.Id) -// rd.Name = flex.StringToFramework(ctx, metadata.Name) -// rd.Status = flex.StringValueToFramework(ctx, metadata.Status) - -// reportsDestination, d := flattenAssessmentReportsDestination(ctx, metadata.AssessmentReportsDestination) -// diags.Append(d...) -// rd.AssessmentReportsDestination = reportsDestination -// roles, d := flattenAssessmentRoles(ctx, metadata.Roles) -// diags.Append(d...) -// rd.RolesAll = roles -// scope, d := flattenAssessmentScope(ctx, metadata.Scope) -// diags.Append(d...) -// rd.Scope = scope - -// setTagsOut(ctx, out.Tags) - -// return diags -// } +func (rd *resourceDataLakeData) refreshFromOutput(ctx context.Context, out *awstypes.DataLakeResource) diag.Diagnostics { + var diags diag.Diagnostics + + if out == nil { + return diags + } + + rd.ARN = flex.StringToFramework(ctx, out.DataLakeArn) + rd.ID = flex.StringToFramework(ctx, out.DataLakeArn) + configurations, d := flattenDataLakeConfigurations(ctx, []*awstypes.DataLakeResource{out}) + diags.Append(d...) + rd.Configurations = configurations + + return diags +} diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 94452155d8f8..b7414005c424 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" tfsecuritylake "github.com/hashicorp/terraform-provider-aws/internal/service/securitylake" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -108,11 +109,12 @@ func testAccCheckDataLakeDestroy(ctx context.Context) resource.TestCheckFunc { } _, err := tfsecuritylake.FindDataLakeByID(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + if err != nil { - var nfe *types.ResourceNotFoundException - if errors.As(err, &nfe) { - return nil - } return err } @@ -140,9 +142,7 @@ func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *type return create.Error(names.SecurityLake, create.ErrActionCheckingExistence, tfsecuritylake.ResNameDataLake, rs.Primary.ID, err) } - dl := &resp.DataLakes[0] - - *datalake = *dl + *datalake = *resp return nil } @@ -154,8 +154,23 @@ func testAccDataLakeConfig_basic() string { meta_store_manager_role_arn = "arn:aws:iam::182198062889:role/service-role/AmazonSecurityLakeMetaStoreManager" configurations { - region = "eu-west-2" - } - } + + encryption_configuration { + kms_key_id = "S3_MANAGED_KEY" + } + + lifecycle_configuration { + transitions { + days = 31 + storage_class = "STANDARD_IA" + } + expiration { + days = 300 + } + } + + region = "eu-west-2" + } + } `) } diff --git a/internal/service/securitylake/generate.go b/internal/service/securitylake/generate.go index 6e045f9022e5..7989b98eea6a 100644 --- a/internal/service/securitylake/generate.go +++ b/internal/service/securitylake/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ServiceTagsMap -KVTValues -SkipTypesImp -ListTags -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ServiceTagsSlice -ListTags -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/securitylake/service_package_gen.go b/internal/service/securitylake/service_package_gen.go index f5c759a7281f..7f35a30258e9 100644 --- a/internal/service/securitylake/service_package_gen.go +++ b/internal/service/securitylake/service_package_gen.go @@ -23,9 +23,6 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic { Factory: newResourceDataLake, Name: "Data Lake", - Tags: &types.ServicePackageResourceTags{ - IdentifierAttribute: "arn", - }, }, } } diff --git a/internal/service/securitylake/tags_gen.go b/internal/service/securitylake/tags_gen.go new file mode 100644 index 000000000000..05a42fb3fbcd --- /dev/null +++ b/internal/service/securitylake/tags_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package securitylake + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/securitylake" + awstypes "github.com/aws/aws-sdk-go-v2/service/securitylake/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists securitylake service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *securitylake.Client, identifier string) (tftags.KeyValueTags, error) { + input := &securitylake.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, input) + + if err != nil { + return tftags.New(ctx, nil), err + } + + return KeyValueTags(ctx, output.Tags), nil +} + +// ListTags lists securitylake service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).SecurityLakeClient(ctx), identifier) + + if err != nil { + return err + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = types.Some(tags) + } + + return nil +} + +// []*SERVICE.Tag handling + +// Tags returns securitylake service tags. +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := awstypes.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// KeyValueTags creates tftags.KeyValueTags from securitylake service tags. +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.ToString(tag.Key)] = tag.Value + } + + return tftags.New(ctx, m) +} + +// getTagsIn returns securitylake service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) []awstypes.Tag { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets securitylake service tags in Context. +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + } +} + +// updateTags updates securitylake service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *securitylake.Client, identifier string, oldTagsMap, newTagsMap any) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.SecurityLake) + if len(removedTags) > 0 { + input := &securitylake.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, input) + + if err != nil { + return fmt.Errorf("untagging resource (%s): %w", identifier, err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.SecurityLake) + if len(updatedTags) > 0 { + input := &securitylake.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: Tags(updatedTags), + } + + _, err := conn.TagResource(ctx, input) + + if err != nil { + return fmt.Errorf("tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// UpdateTags updates securitylake service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).SecurityLakeClient(ctx), identifier, oldTags, newTags) +} From 8b100b9c42cb78ccd32fd721bbd63a8ad807b6a4 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Sun, 26 Nov 2023 22:37:38 +0000 Subject: [PATCH 05/45] Added tags and disapear tests --- internal/service/securitylake/data_lake.go | 27 ++++---- .../service/securitylake/data_lake_test.go | 63 ++++++++----------- internal/service/securitylake/exports_test.go | 9 +++ 3 files changed, 49 insertions(+), 50 deletions(-) create mode 100644 internal/service/securitylake/exports_test.go diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 7ceb0f3f9ebd..8b17c546bfab 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" @@ -26,8 +27,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" - // tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -68,8 +68,7 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques stringplanmodifier.RequiresReplace(), }, }, - // names.AttrTags: tftags.TagsAttribute(), - // names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + names.AttrTags: tftags.TagsAttribute(), }, Blocks: map[string]schema.Block{ "configurations": schema.SetNestedBlock{ @@ -180,7 +179,7 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques in := &securitylake.CreateDataLakeInput{ Configurations: expanddataLakeConfigurations(ctx, configurations), MetaStoreManagerRoleArn: aws.String(plan.MetaStoreManagerRoleArn.ValueString()), - // Tags: getTagsIn(ctx), + Tags: getTagsIn(ctx), } out, err := conn.CreateDataLake(ctx, in) @@ -206,6 +205,9 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques return } state := plan + fmt.Println(plan.Tags) + state.Tags = plan.Tags + state.MetaStoreManagerRoleArn = plan.MetaStoreManagerRoleArn resp.Diagnostics.Append(state.refreshFromOutput(ctx, waitOut)...) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } @@ -769,17 +771,16 @@ var ( ) // func (r *resourceDataLake) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { -// r.List(ctx, request, response) +// r.SetTagsAll(ctx, request, response) // } type resourceDataLakeData struct { - ARN types.String `tfsdk:"arn"` - ID types.String `tfsdk:"id"` - MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` - Configurations types.Set `tfsdk:"configurations"` - // Tags types.Map `tfsdk:"tags"` - // TagsAll types.Map `tfsdk:"tags_all"` - Timeouts timeouts.Value `tfsdk:"timeouts"` + ARN types.String `tfsdk:"arn"` + ID types.String `tfsdk:"id"` + MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` + Configurations types.Set `tfsdk:"configurations"` + Tags types.Map `tfsdk:"tags"` + Timeouts timeouts.Value `tfsdk:"timeouts"` } type dataLakeConfigurationsData struct { diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index b7414005c424..d2504b5be413 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -61,43 +61,32 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { }) } -// func TestAccSecurityLakeDataLake_disappears(t *testing.T) { -// ctx := acctest.Context(t) -// if testing.Short() { -// t.Skip("skipping long-running test in short mode") -// } - -// var datalake securitylake.DescribeDataLakeResponse -// rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) -// resourceName := "aws_securitylake_data_lake.test" - -// resource.ParallelTest(t, resource.TestCase{ -// PreCheck: func() { -// acctest.PreCheck(ctx, t) -// acctest.PreCheckPartitionHasService(t, names.SecurityLakeEndpointID) -// testAccPreCheck(t) -// }, -// ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), -// ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, -// CheckDestroy: testAccCheckDataLakeDestroy(ctx), -// Steps: []resource.TestStep{ -// { -// Config: testAccDataLakeConfig_basic(rName, testAccDataLakeVersionNewer), -// Check: resource.ComposeTestCheckFunc( -// testAccCheckDataLakeExists(ctx, resourceName, &datalake), -// // TIP: The Plugin-Framework disappears helper is similar to the Plugin-SDK version, -// // but expects a new resource factory function as the third argument. To expose this -// // private function to the testing package, you may need to add a line like the following -// // to exports_test.go: -// // -// // var ResourceDataLake = newResourceDataLake -// acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfsecuritylake.ResourceDataLake, resourceName), -// ), -// ExpectNonEmptyPlan: true, -// }, -// }, -// }) -// } +func TestAccSecurityLakeDataLake_disappears(t *testing.T) { + ctx := acctest.Context(t) + var datalake types.DataLakeResource + // rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_basic(), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfsecuritylake.ResourceDataLake, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} func testAccCheckDataLakeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { diff --git a/internal/service/securitylake/exports_test.go b/internal/service/securitylake/exports_test.go new file mode 100644 index 000000000000..6d239c0a19fc --- /dev/null +++ b/internal/service/securitylake/exports_test.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package securitylake + +// Exports for use in tests only. +var ( + ResourceDataLake = newResourceDataLake +) From 70f107c2ae151e88488dd76a49d56307d3a37a53 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Tue, 28 Nov 2023 14:08:30 +0000 Subject: [PATCH 06/45] Removed iam --- internal/service/securitylake/data_lake.go | 48 +++++++++------ .../service/securitylake/data_lake_test.go | 61 ++++++++++--------- 2 files changed, 59 insertions(+), 50 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 8b17c546bfab..cde230bd017c 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" @@ -60,8 +61,8 @@ func (r *resourceDataLake) Metadata(_ context.Context, req resource.MetadataRequ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - "arn": framework.ARNAttributeComputedOnly(), - "id": framework.IDAttribute(), + "datalake_arn": framework.ARNAttributeComputedOnly(), + "id": framework.IDAttribute(), "meta_store_manager_role_arn": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -85,6 +86,7 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques "encryption_configuration": schema.ListNestedBlock{ Validators: []validator.List{ listvalidator.SizeAtMost(1), + listvalidator.SizeAtLeast(1), }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ @@ -100,12 +102,18 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques Validators: []validator.List{ listvalidator.SizeAtMost(1), }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, NestedObject: schema.NestedBlockObject{ Blocks: map[string]schema.Block{ "expiration": schema.ListNestedBlock{ Validators: []validator.List{ listvalidator.SizeAtMost(1), }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "days": schema.Int64Attribute{ @@ -171,6 +179,7 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques } var configurations []dataLakeConfigurationsData + resp.Diagnostics.Append(plan.Configurations.ElementsAs(ctx, &configurations, false)...) if resp.Diagnostics.HasError() { return @@ -192,24 +201,20 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques return } - plan.ARN = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) + plan.DataLakeArn = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - waitOut, err := waitDataLakeCreated(ctx, conn, plan.ARN.ValueString(), createTimeout) - fmt.Println(waitOut) + waitOut, err := waitDataLakeCreated(ctx, conn, plan.DataLakeArn.ValueString(), createTimeout) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, plan.ARN.ValueString(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, plan.DataLakeArn.ValueString(), err), err.Error(), ) return } - state := plan - fmt.Println(plan.Tags) - state.Tags = plan.Tags - state.MetaStoreManagerRoleArn = plan.MetaStoreManagerRoleArn - resp.Diagnostics.Append(state.refreshFromOutput(ctx, waitOut)...) - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + + resp.Diagnostics.Append(plan.refreshFromOutput(ctx, waitOut)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { @@ -480,6 +485,7 @@ func flattenDataLakeConfigurations(ctx context.Context, apiObjects []*awstypes.D diags.Append(d...) elems = append(elems, objVal) + fmt.Println("End of Flattener") } setVal, d := types.SetValue(elemType, elems) @@ -617,23 +623,28 @@ func expanddataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigur diags.Append(tfObj.EncryptionConfiguration.ElementsAs(ctx, &encryptionConfiguration, false)...) diags.Append(tfObj.LifecycleConfiguration.ElementsAs(ctx, &lifecycleConfiguration, false)...) diags.Append(tfObj.ReplicationConfiguration.ElementsAs(ctx, &replicationConfiguration, false)...) + lifecycleConfiguration, d := expandLifecycleConfiguration(ctx, lifecycleConfiguration) + diags.Append(d...) item := awstypes.DataLakeConfiguration{ Region: aws.String(tfObj.Region.ValueString()), } if !tfObj.EncryptionConfiguration.IsNull() { + fmt.Println(tfObj.EncryptionConfiguration) item.EncryptionConfiguration = expandEncryptionConfiguration(encryptionConfiguration) } if !tfObj.LifecycleConfiguration.IsNull() { - item.LifecycleConfiguration, _ = expandLifecycleConfiguration(ctx, lifecycleConfiguration) + fmt.Println(tfObj.LifecycleConfiguration) + item.LifecycleConfiguration = lifecycleConfiguration } if !tfObj.ReplicationConfiguration.IsNull() { + fmt.Println(tfObj.ReplicationConfiguration) item.ReplicationConfiguration = expandReplicationConfiguration(ctx, replicationConfiguration) } - + fmt.Println("End of Expander") apiObject = append(apiObject, item) } @@ -641,13 +652,9 @@ func expanddataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigur } func expandEncryptionConfiguration(tfList []dataLakeConfigurationsEncryption) *awstypes.DataLakeEncryptionConfiguration { - if len(tfList) == 0 { - return nil - } tfObj := tfList[0] apiObject := &awstypes.DataLakeEncryptionConfiguration{} - if !tfObj.KmsKeyID.IsNull() { apiObject.KmsKeyId = aws.String(tfObj.KmsKeyID.ValueString()) } @@ -775,7 +782,7 @@ var ( // } type resourceDataLakeData struct { - ARN types.String `tfsdk:"arn"` + DataLakeArn types.String `tfsdk:"datalake_arn"` ID types.String `tfsdk:"id"` MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` Configurations types.Set `tfsdk:"configurations"` @@ -828,10 +835,11 @@ func (rd *resourceDataLakeData) refreshFromOutput(ctx context.Context, out *awst return diags } - rd.ARN = flex.StringToFramework(ctx, out.DataLakeArn) + rd.DataLakeArn = flex.StringToFramework(ctx, out.DataLakeArn) rd.ID = flex.StringToFramework(ctx, out.DataLakeArn) configurations, d := flattenDataLakeConfigurations(ctx, []*awstypes.DataLakeResource{out}) diags.Append(d...) + fmt.Println(configurations) rd.Configurations = configurations return diags diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index d2504b5be413..a6f4af8a3cf5 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -40,15 +40,14 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { Config: testAccDataLakeConfig_basic(), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), - // resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), - // resource.TestCheckResourceAttrSet(resourceName, "maintenance_window_start_time.0.day_of_week"), - // resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user.*", map[string]string{ - // "console_access": "false", - // "groups.#": "0", - // "username": "Test", - // "password": "TestTest1234", - // }), - // acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "securitylake", regexp.MustCompile(`data-lake/:+.`)), + // resource.TestCheckResourceAttrPair(resourceName, "instance_id", "aws_instance.test", "id"), + // resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), + // resource.TestCheckResourceAttr(resourceName, "targets.0.key", "InstanceIds"), + // resource.TestCheckResourceAttr(resourceName, "targets.0.values.#", "1"), + // resource.TestCheckResourceAttrPair(resourceName, "targets.0.values.0", "aws_instance.test", "id"), + // resource.TestCheckResourceAttr(resourceName, "parameters.%", "0"), + // resource.TestCheckResourceAttr(resourceName, "document_version", "$DEFAULT"), + // resource.TestCheckResourceAttr(resourceName, "name", rName), ), }, { @@ -139,27 +138,29 @@ func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *type func testAccDataLakeConfig_basic() string { return fmt.Sprintf(` - resource "aws_securitylake_data_lake" "test" { - meta_store_manager_role_arn = "arn:aws:iam::182198062889:role/service-role/AmazonSecurityLakeMetaStoreManager" - - configurations { - - encryption_configuration { - kms_key_id = "S3_MANAGED_KEY" - } - - lifecycle_configuration { - transitions { - days = 31 - storage_class = "STANDARD_IA" - } - expiration { - days = 300 - } - } - - region = "eu-west-2" - } +resource "aws_securitylake_data_lake" "test" { + meta_store_manager_role_arn = "arn:aws:iam::12345:role/service-role/AmazonSecurityLakeMetaStoreManager" + + configurations { + region = "eu-west-1" + encryption_configuration { + kms_key_id = "S3_MANAGED_KEY" + } + + lifecycle_configuration { + transitions { + days = 31 + storage_class = "STANDARD_IA" + } + expiration { + days = 300 + } + } + replication_configuration { + role_arn = "arn:aws:iam::123454:role/service-role/AmazonSecurityLakeS3ReplicationRole" + regions = ["ap-south-1"] } + } +} `) } From d68af9ead1e9192bb2e0c4656d29233580195f60 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Thu, 30 Nov 2023 11:33:16 +0000 Subject: [PATCH 07/45] All tests that include the encryption run succesffuly --- internal/service/securitylake/data_lake.go | 96 ++-- .../service/securitylake/data_lake_test.go | 473 +++++++++++++++++- 2 files changed, 490 insertions(+), 79 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index cde230bd017c..b15892443507 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package securitylake import ( @@ -18,7 +21,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" @@ -26,6 +28,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -75,6 +78,7 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques "configurations": schema.SetNestedBlock{ Validators: []validator.Set{ setvalidator.SizeAtLeast(1), + setvalidator.SizeAtMost(1), }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ @@ -84,10 +88,6 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques }, Blocks: map[string]schema.Block{ "encryption_configuration": schema.ListNestedBlock{ - Validators: []validator.List{ - listvalidator.SizeAtMost(1), - listvalidator.SizeAtLeast(1), - }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "kms_key_id": schema.StringAttribute{ @@ -102,18 +102,12 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques Validators: []validator.List{ listvalidator.SizeAtMost(1), }, - PlanModifiers: []planmodifier.List{ - listplanmodifier.UseStateForUnknown(), - }, NestedObject: schema.NestedBlockObject{ Blocks: map[string]schema.Block{ "expiration": schema.ListNestedBlock{ Validators: []validator.List{ listvalidator.SizeAtMost(1), }, - PlanModifiers: []planmodifier.List{ - listplanmodifier.UseStateForUnknown(), - }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "days": schema.Int64Attribute{ @@ -123,9 +117,6 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques }, }, "transitions": schema.SetNestedBlock{ - Validators: []validator.Set{ - setvalidator.SizeAtMost(1), - }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "days": schema.Int64Attribute{ @@ -169,15 +160,14 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques } func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().SecurityLakeClient(ctx) - var plan resourceDataLakeData - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) if resp.Diagnostics.HasError() { return } + conn := r.Meta().SecurityLakeClient(ctx) + var configurations []dataLakeConfigurationsData resp.Diagnostics.Append(plan.Configurations.ElementsAs(ctx, &configurations, false)...) @@ -186,7 +176,7 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques } in := &securitylake.CreateDataLakeInput{ - Configurations: expanddataLakeConfigurations(ctx, configurations), + Configurations: expandDataLakeConfigurations(ctx, configurations), MetaStoreManagerRoleArn: aws.String(plan.MetaStoreManagerRoleArn.ValueString()), Tags: getTagsIn(ctx), } @@ -212,7 +202,6 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques ) return } - resp.Diagnostics.Append(plan.refreshFromOutput(ctx, waitOut)...) resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } @@ -256,7 +245,6 @@ func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateReques } if !plan.Configurations.Equal(state.Configurations) { - var configurations []dataLakeConfigurationsData resp.Diagnostics.Append(plan.Configurations.ElementsAs(ctx, &configurations, false)...) if resp.Diagnostics.HasError() { @@ -264,7 +252,7 @@ func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateReques } in := &securitylake.UpdateDataLakeInput{ - Configurations: expanddataLakeConfigurations(ctx, configurations), + Configurations: expandDataLakeConfigurations(ctx, configurations), } out, err := conn.UpdateDataLake(ctx, in) @@ -345,8 +333,8 @@ func (r *resourceDataLake) ImportState(ctx context.Context, req resource.ImportS func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*awstypes.DataLakeResource, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{string(awstypes.DataLakeStatusInitialized)}, - Target: []string{string(awstypes.DataLakeStatusCompleted)}, + Pending: enum.Slice(awstypes.DataLakeStatusInitialized), + Target: enum.Slice(awstypes.DataLakeStatusCompleted), Refresh: createStatusDataLake(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, @@ -363,8 +351,8 @@ func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id stri func waitDataLakeUpdated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{string(awstypes.DataLakeStatusPending)}, - Target: []string{string(awstypes.DataLakeStatusCompleted)}, + Pending: enum.Slice(awstypes.DataLakeStatusPending, awstypes.DataLakeStatusInitialized), + Target: enum.Slice(awstypes.DataLakeStatusCompleted), Refresh: updateStatusDataLake(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, @@ -381,7 +369,7 @@ func waitDataLakeUpdated(ctx context.Context, conn *securitylake.Client, id stri func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{string(awstypes.DataLakeStatusInitialized), string(awstypes.DataLakeStatusCompleted)}, + Pending: enum.Slice(awstypes.DataLakeStatusInitialized, awstypes.DataLakeStatusCompleted), Target: []string{}, Refresh: createStatusDataLake(ctx, conn, id), Timeout: timeout, @@ -464,15 +452,11 @@ func flattenDataLakeConfigurations(ctx context.Context, apiObjects []*awstypes.D elems := []attr.Value{} for _, apiObject := range apiObjects { - encryptionConfiguration, d := flattenEncryptionConfiguration(ctx, apiObject.EncryptionConfiguration) - fmt.Println(encryptionConfiguration) diags.Append(d...) lifecycleExpiration, d := flattenLifeCycleConfiguration(ctx, apiObject.LifecycleConfiguration) - fmt.Println(lifecycleExpiration) diags.Append(d...) replicationConfiguration, d := flattenReplicationConfiguration(ctx, apiObject.ReplicationConfiguration) - fmt.Println(replicationConfiguration) diags.Append(d...) obj := map[string]attr.Value{ @@ -485,7 +469,6 @@ func flattenDataLakeConfigurations(ctx context.Context, apiObjects []*awstypes.D diags.Append(d...) elems = append(elems, objVal) - fmt.Println("End of Flattener") } setVal, d := types.SetValue(elemType, elems) @@ -494,24 +477,18 @@ func flattenDataLakeConfigurations(ctx context.Context, apiObjects []*awstypes.D return setVal, diags } -func flattenLifeCycleConfiguration(ctx context.Context, apiObject *awstypes.DataLakeLifecycleConfiguration) (types.List, diag.Diagnostics) { +func flattenEncryptionConfiguration(ctx context.Context, apiObject *awstypes.DataLakeEncryptionConfiguration) (types.List, diag.Diagnostics) { var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTypes} + elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsEncryptionTypes} if apiObject == nil { return types.ListNull(elemType), diags } - expiration, d := flattenLifecycleExpiration(ctx, apiObject.Expiration) - diags.Append(d...) - transitions, d := flattenLifecycleTransitions(ctx, apiObject.Transitions) - diags.Append(d...) - obj := map[string]attr.Value{ - "expiration": expiration, - "transitions": transitions, + "kms_key_id": flex.StringToFramework(ctx, apiObject.KmsKeyId), } - objVal, d := types.ObjectValue(dataLakeConfigurationsLifecycleTypes, obj) + objVal, d := types.ObjectValue(dataLakeConfigurationsEncryptionTypes, obj) diags.Append(d...) listVal, d := types.ListValue(elemType, []attr.Value{objVal}) @@ -520,18 +497,24 @@ func flattenLifeCycleConfiguration(ctx context.Context, apiObject *awstypes.Data return listVal, diags } -func flattenEncryptionConfiguration(ctx context.Context, apiObject *awstypes.DataLakeEncryptionConfiguration) (types.List, diag.Diagnostics) { +func flattenLifeCycleConfiguration(ctx context.Context, apiObject *awstypes.DataLakeLifecycleConfiguration) (types.List, diag.Diagnostics) { var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsEncryptionTypes} + elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTypes} - if apiObject == nil { + if apiObject == nil || (apiObject.Expiration == nil && len(apiObject.Transitions) == 0) { return types.ListNull(elemType), diags } + expiration, d := flattenLifecycleExpiration(ctx, apiObject.Expiration) + diags.Append(d...) + transitions, d := flattenLifecycleTransitions(ctx, apiObject.Transitions) + diags.Append(d...) + obj := map[string]attr.Value{ - "kms_key_id": flex.StringToFramework(ctx, apiObject.KmsKeyId), + "expiration": expiration, + "transitions": transitions, } - objVal, d := types.ObjectValue(dataLakeConfigurationsEncryptionTypes, obj) + objVal, d := types.ObjectValue(dataLakeConfigurationsLifecycleTypes, obj) diags.Append(d...) listVal, d := types.ListValue(elemType, []attr.Value{objVal}) @@ -544,7 +527,7 @@ func flattenLifecycleExpiration(ctx context.Context, apiObject *awstypes.DataLak var diags diag.Diagnostics elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes} - if apiObject == nil || apiObject.Days == nil { + if apiObject == nil { return types.ListNull(elemType), diags } @@ -565,7 +548,7 @@ func flattenLifecycleTransitions(ctx context.Context, apiObjects []awstypes.Data var diags diag.Diagnostics elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes} - if len(apiObjects) == 0 || (apiObjects[0].Days == nil && apiObjects[0].StorageClass == nil) { + if len(apiObjects) == 0 { return types.SetValueMust(elemType, []attr.Value{}), diags } @@ -608,7 +591,7 @@ func flattenReplicationConfiguration(ctx context.Context, apiObject *awstypes.Da return listVal, diags } -func expanddataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigurationsData) []awstypes.DataLakeConfiguration { +func expandDataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigurationsData) []awstypes.DataLakeConfiguration { var diags diag.Diagnostics if len(tfList) == 0 { return nil @@ -620,7 +603,6 @@ func expanddataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigur var replicationConfiguration []dataLakeConfigurationsReplicationConfiguration for _, tfObj := range tfList { - diags.Append(tfObj.EncryptionConfiguration.ElementsAs(ctx, &encryptionConfiguration, false)...) diags.Append(tfObj.LifecycleConfiguration.ElementsAs(ctx, &lifecycleConfiguration, false)...) diags.Append(tfObj.ReplicationConfiguration.ElementsAs(ctx, &replicationConfiguration, false)...) lifecycleConfiguration, d := expandLifecycleConfiguration(ctx, lifecycleConfiguration) @@ -631,20 +613,17 @@ func expanddataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigur } if !tfObj.EncryptionConfiguration.IsNull() { - fmt.Println(tfObj.EncryptionConfiguration) item.EncryptionConfiguration = expandEncryptionConfiguration(encryptionConfiguration) } if !tfObj.LifecycleConfiguration.IsNull() { - fmt.Println(tfObj.LifecycleConfiguration) item.LifecycleConfiguration = lifecycleConfiguration } if !tfObj.ReplicationConfiguration.IsNull() { - fmt.Println(tfObj.ReplicationConfiguration) item.ReplicationConfiguration = expandReplicationConfiguration(ctx, replicationConfiguration) } - fmt.Println("End of Expander") + apiObject = append(apiObject, item) } @@ -652,6 +631,9 @@ func expanddataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigur } func expandEncryptionConfiguration(tfList []dataLakeConfigurationsEncryption) *awstypes.DataLakeEncryptionConfiguration { + if len(tfList) == 0 { + return nil + } tfObj := tfList[0] apiObject := &awstypes.DataLakeEncryptionConfiguration{} @@ -777,10 +759,6 @@ var ( } ) -// func (r *resourceDataLake) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { -// r.SetTagsAll(ctx, request, response) -// } - type resourceDataLakeData struct { DataLakeArn types.String `tfsdk:"datalake_arn"` ID types.String `tfsdk:"id"` @@ -839,7 +817,7 @@ func (rd *resourceDataLakeData) refreshFromOutput(ctx context.Context, out *awst rd.ID = flex.StringToFramework(ctx, out.DataLakeArn) configurations, d := flattenDataLakeConfigurations(ctx, []*awstypes.DataLakeResource{out}) diags.Append(d...) - fmt.Println(configurations) + rd.Configurations = configurations return diags diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index a6f4af8a3cf5..340dcae41704 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package securitylake_test import ( @@ -7,6 +10,7 @@ import ( "testing" "github.com/aws/aws-sdk-go-v2/service/securitylake/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -24,7 +28,7 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { } var datalake types.DataLakeResource - // rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" resource.ParallelTest(t, resource.TestCase{ @@ -37,24 +41,184 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataLakeConfig_basic(), + Config: testAccDataLakeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), - // resource.TestCheckResourceAttrPair(resourceName, "instance_id", "aws_instance.test", "id"), - // resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), - // resource.TestCheckResourceAttr(resourceName, "targets.0.key", "InstanceIds"), - // resource.TestCheckResourceAttr(resourceName, "targets.0.values.#", "1"), - // resource.TestCheckResourceAttrPair(resourceName, "targets.0.values.0", "aws_instance.test", "id"), - // resource.TestCheckResourceAttr(resourceName, "parameters.%", "0"), - // resource.TestCheckResourceAttr(resourceName, "document_version", "$DEFAULT"), - // resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, + }, + }, + }) +} + +func TestAccSecurityLakeDataLake_lifeCycle(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_lifeCycle(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "2"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.1.days", "80"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.1.storage_class", "ONEZONE_IA"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.0.days", "300"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, + }, + }, + }) +} + +func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_lifeCycle(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "2"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.1.days", "80"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.1.storage_class", "ONEZONE_IA"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.0.days", "300"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, + }, + { + Config: testAccDataLakeConfig_lifeCycleUpdate(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.0.days", "300"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, + }, + }, + }) +} + +func TestAccSecurityLakeDataLake_replication(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.region_2" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_replication(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.0.days", "300"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.replication_configuration.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "configurations.0.replication_configuration.0.role_arn", "aws_iam_role.datalake_s3_replication", "arn"), + resource.TestCheckResourceAttr(resourceName, "configurations.0.replication_configuration.0.regions.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, }, }, }) @@ -63,7 +227,7 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { func TestAccSecurityLakeDataLake_disappears(t *testing.T) { ctx := acctest.Context(t) var datalake types.DataLakeResource - // rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" resource.ParallelTest(t, resource.TestCase{ @@ -76,7 +240,7 @@ func TestAccSecurityLakeDataLake_disappears(t *testing.T) { CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataLakeConfig_basic(), + Config: testAccDataLakeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfsecuritylake.ResourceDataLake, resourceName), @@ -136,13 +300,245 @@ func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *type } } -func testAccDataLakeConfig_basic() string { +func testAccDataLakeConfigBaseConfig(rName string) string { + //lintignore:AWSAT003,AWSAT005 return fmt.Sprintf(` + + +data "aws_caller_identity" "current" {} + +resource "aws_iam_role" "meta_store_manager" { + name = "AmazonSecurityLakeMetaStoreManager" + path = "/service-role/" + assume_role_policy = < Date: Thu, 30 Nov 2023 14:16:47 +0000 Subject: [PATCH 08/45] Added Document and changelog --- .changelog/29376.txt | 3 + .../r/securitylake_data_lake.html.markdown | 100 +++++++++++++++--- .../r/securitylake_data_lake2.html.markdown | 60 ----------- ...ylake_exception_subscription.html.markdown | 60 ----------- ...ylake_securitylake_data_lake.html.markdown | 60 ----------- 5 files changed, 86 insertions(+), 197 deletions(-) create mode 100644 .changelog/29376.txt delete mode 100644 website/docs/r/securitylake_data_lake2.html.markdown delete mode 100644 website/docs/r/securitylake_exception_subscription.html.markdown delete mode 100644 website/docs/r/securitylake_securitylake_data_lake.html.markdown diff --git a/.changelog/29376.txt b/.changelog/29376.txt new file mode 100644 index 000000000000..e5eda46c5d05 --- /dev/null +++ b/.changelog/29376.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +resource/aws_securitylake_data_lake +``` diff --git a/website/docs/r/securitylake_data_lake.html.markdown b/website/docs/r/securitylake_data_lake.html.markdown index b8b33966be3e..cf0962087754 100644 --- a/website/docs/r/securitylake_data_lake.html.markdown +++ b/website/docs/r/securitylake_data_lake.html.markdown @@ -5,24 +5,54 @@ page_title: "AWS: aws_securitylake_data_lake" description: |- Terraform resource for managing an AWS Security Lake Data Lake. --- -` + # Resource: aws_securitylake_data_lake Terraform resource for managing an AWS Security Lake Data Lake. ## Example Usage +```terraform +resource "aws_securitylake_data_lake" "example" { + meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn + + configurations { + region = "eu-west-1" + + encryption_configuration { + kms_key_id = "S3_MANAGED_KEY" + } + + lifecycle_configuration { + transitions { + days = 31 + storage_class = "STANDARD_IA" + } + transitions { + days = 80 + storage_class = "ONEZONE_IA" + } + expiration { + days = 300 + } + } + } +} +``` + ### Basic Usage ```terraform resource "aws_securitylake_data_lake" "example" { + meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn + + configurations { + region = "eu-west-1" + + encryption_configuration { + kms_key_id = "S3_MANAGED_KEY" + } + } } ``` @@ -30,18 +60,45 @@ resource "aws_securitylake_data_lake" "example" { The following arguments are required: -* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `meta_store_manager_role_arn` - (Required)The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources. +* `configurations` - (Required) Specify the Region or Regions that will contribute data to the rollup region. + +Configurations support the following: + +* `region` - (Required) The AWS Regions where Security Lake is automatically enabled. +* `encryption_configuration` - (Optional) Provides encryption details of Amazon Security Lake object. +* `lifecycle_configuration` - (Optional) Provides lifecycle details of Amazon Security Lake object. +* `replication_configuration` - (Optional) Provides replication details of Amazon Security Lake object. + +Encryption Configuration support the following: + +* `kms_key_id` - (Optional) The id of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object. -The following arguments are optional: +Lifecycle Configuration support the following: -* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `expiration` - (Optional) Provides data expiration details of Amazon Security Lake object. +* `transitions` - (Optional) Provides data storage transition details of Amazon Security Lake object. -## Attributes Reference +Expiration Configuration support the following: -In addition to all arguments above, the following attributes are exported: +* `days` - (Optional) Number of days before data expires in the Amazon Security Lake object. + +Transitions support the following: + +* `days` - (Optional) Number of days before data transitions to a different S3 Storage Class in the Amazon Security Lake object. +* `storage_class` - (Optional) The range of storage classes that you can choose from based on the data access, resiliency, and cost requirements of your workloads. + +Replication Configuration support the following: + +* `regions` - (Optional) Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same AWS account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different AWS Regions or within the same Region as the source bucket. +* `role_arn` - (Optional) Replication settings for the Amazon S3 buckets. This parameter uses the AWS Identity and Access Management (IAM) role you created that is managed by Security Lake, to ensure the replication setting is correct. + + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Data Lake. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. -* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. ## Timeouts @@ -53,8 +110,17 @@ In addition to all arguments above, the following attributes are exported: ## Import -Security Lake Data Lake can be imported using the `example_id_arg`, e.g., +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Hub standards subscriptions using the standards subscription ARN. For example: +```terraform +import { + to = aws_securitylake_data_lake.example + id = "arn:aws:securitylake:eu-west-1:123456789012:data-lake/default" +} ``` -$ terraform import aws_securitylake_data_lake.example rft-8012925589 -``` + +Using `terraform import`, import Security Hub standards subscriptions using the standards subscription ARN. For example: + +```console +% terraform import aws_securitylake_data_lake.example arn:aws:securitylake:eu-west-1:123456789012:data-lake/default +``` \ No newline at end of file diff --git a/website/docs/r/securitylake_data_lake2.html.markdown b/website/docs/r/securitylake_data_lake2.html.markdown deleted file mode 100644 index d66644bb3fa8..000000000000 --- a/website/docs/r/securitylake_data_lake2.html.markdown +++ /dev/null @@ -1,60 +0,0 @@ ---- -subcategory: "Security Lake" -layout: "aws" -page_title: "AWS: aws_securitylake_data_lake2" -description: |- - Terraform resource for managing an AWS Security Lake Data Lake2. ---- -` -# Resource: aws_securitylake_data_lake2 - -Terraform resource for managing an AWS Security Lake Data Lake2. - -## Example Usage - -### Basic Usage - -```terraform -resource "aws_securitylake_data_lake2" "example" { -} -``` - -## Argument Reference - -The following arguments are required: - -* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - -The following arguments are optional: - -* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `arn` - ARN of the Data Lake2. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. -* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - -## Timeouts - -[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - -* `create` - (Default `60m`) -* `update` - (Default `180m`) -* `delete` - (Default `90m`) - -## Import - -Security Lake Data Lake2 can be imported using the `example_id_arg`, e.g., - -``` -$ terraform import aws_securitylake_data_lake2.example rft-8012925589 -``` diff --git a/website/docs/r/securitylake_exception_subscription.html.markdown b/website/docs/r/securitylake_exception_subscription.html.markdown deleted file mode 100644 index b506af5ae935..000000000000 --- a/website/docs/r/securitylake_exception_subscription.html.markdown +++ /dev/null @@ -1,60 +0,0 @@ ---- -subcategory: "Security Lake" -layout: "aws" -page_title: "AWS: aws_securitylake_exception_subscription" -description: |- - Terraform resource for managing an AWS Security Lake Exception Subscription. ---- -` -# Resource: aws_securitylake_exception_subscription - -Terraform resource for managing an AWS Security Lake Exception Subscription. - -## Example Usage - -### Basic Usage - -```terraform -resource "aws_securitylake_exception_subscription" "example" { -} -``` - -## Argument Reference - -The following arguments are required: - -* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - -The following arguments are optional: - -* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `arn` - ARN of the Exception Subscription. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. -* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - -## Timeouts - -[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - -* `create` - (Default `60m`) -* `update` - (Default `180m`) -* `delete` - (Default `90m`) - -## Import - -Security Lake Exception Subscription can be imported using the `example_id_arg`, e.g., - -``` -$ terraform import aws_securitylake_exception_subscription.example rft-8012925589 -``` diff --git a/website/docs/r/securitylake_securitylake_data_lake.html.markdown b/website/docs/r/securitylake_securitylake_data_lake.html.markdown deleted file mode 100644 index f235dac65434..000000000000 --- a/website/docs/r/securitylake_securitylake_data_lake.html.markdown +++ /dev/null @@ -1,60 +0,0 @@ ---- -subcategory: "Security Lake" -layout: "aws" -page_title: "AWS: aws_securitylake_securitylake_data_lake" -description: |- - Terraform resource for managing an AWS Security Lake Securitylake Data Lake. ---- -` -# Resource: aws_securitylake_securitylake_data_lake - -Terraform resource for managing an AWS Security Lake Securitylake Data Lake. - -## Example Usage - -### Basic Usage - -```terraform -resource "aws_securitylake_securitylake_data_lake" "example" { -} -``` - -## Argument Reference - -The following arguments are required: - -* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - -The following arguments are optional: - -* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `arn` - ARN of the Securitylake Data Lake. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. -* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - -## Timeouts - -[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - -* `create` - (Default `60m`) -* `update` - (Default `180m`) -* `delete` - (Default `90m`) - -## Import - -Security Lake Securitylake Data Lake can be imported using the `example_id_arg`, e.g., - -``` -$ terraform import aws_securitylake_securitylake_data_lake.example rft-8012925589 -``` From 13a160a089a07c701b8907b2cbb25a6f5d7664da Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Thu, 30 Nov 2023 15:40:49 +0000 Subject: [PATCH 09/45] Fixed the document errors --- website/docs/r/securitylake_data_lake.html.markdown | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/docs/r/securitylake_data_lake.html.markdown b/website/docs/r/securitylake_data_lake.html.markdown index cf0962087754..7fd91fa42400 100644 --- a/website/docs/r/securitylake_data_lake.html.markdown +++ b/website/docs/r/securitylake_data_lake.html.markdown @@ -93,7 +93,6 @@ Replication Configuration support the following: * `regions` - (Optional) Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same AWS account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different AWS Regions or within the same Region as the source bucket. * `role_arn` - (Optional) Replication settings for the Amazon S3 buckets. This parameter uses the AWS Identity and Access Management (IAM) role you created that is managed by Security Lake, to ensure the replication setting is correct. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -123,4 +122,4 @@ Using `terraform import`, import Security Hub standards subscriptions using the ```console % terraform import aws_securitylake_data_lake.example arn:aws:securitylake:eu-west-1:123456789012:data-lake/default -``` \ No newline at end of file +``` From ae47528c49897600d2a7fe66ee16467b04814793 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Thu, 30 Nov 2023 16:16:49 +0000 Subject: [PATCH 10/45] Minor fixes --- internal/service/securitylake/data_lake.go | 61 ++++++++++++---------- 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index b15892443507..08620d3a4769 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -65,7 +65,6 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "datalake_arn": framework.ARNAttributeComputedOnly(), - "id": framework.IDAttribute(), "meta_store_manager_role_arn": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -73,6 +72,7 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques }, }, names.AttrTags: tftags.TagsAttribute(), + names.AttrID: framework.IDAttribute(), }, Blocks: map[string]schema.Block{ "configurations": schema.SetNestedBlock{ @@ -137,13 +137,13 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ - "role_arn": schema.StringAttribute{ - Optional: true, - }, "regions": schema.ListAttribute{ ElementType: types.StringType, Optional: true, }, + "role_arn": schema.StringAttribute{ + Optional: true, + }, }, }, }, @@ -160,7 +160,7 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques } func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - var plan resourceDataLakeData + var plan datalakeResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) if resp.Diagnostics.HasError() { return @@ -192,12 +192,13 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques } plan.DataLakeArn = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) + plan.setID() createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - waitOut, err := waitDataLakeCreated(ctx, conn, plan.DataLakeArn.ValueString(), createTimeout) + waitOut, err := waitDataLakeCreated(ctx, conn, plan.ID.ValueString(), createTimeout) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, plan.DataLakeArn.ValueString(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, plan.ID.ValueString(), err), err.Error(), ) return @@ -209,7 +210,7 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { conn := r.Meta().SecurityLakeClient(ctx) - var state resourceDataLakeData + var state datalakeResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { @@ -237,7 +238,7 @@ func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, r func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { conn := r.Meta().SecurityLakeClient(ctx) - var plan, state resourceDataLakeData + var plan, state datalakeResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { @@ -290,7 +291,7 @@ func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateReques func (r *resourceDataLake) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { conn := r.Meta().SecurityLakeClient(ctx) - var state resourceDataLakeData + var state datalakeResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { return @@ -759,7 +760,7 @@ var ( } ) -type resourceDataLakeData struct { +type datalakeResourceModel struct { DataLakeArn types.String `tfsdk:"datalake_arn"` ID types.String `tfsdk:"id"` MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` @@ -768,6 +769,27 @@ type resourceDataLakeData struct { Timeouts timeouts.Value `tfsdk:"timeouts"` } +func (model *datalakeResourceModel) setID() { + model.ID = model.DataLakeArn +} + +func (model *datalakeResourceModel) refreshFromOutput(ctx context.Context, out *awstypes.DataLakeResource) diag.Diagnostics { + var diags diag.Diagnostics + + if out == nil { + return diags + } + + model.DataLakeArn = flex.StringToFramework(ctx, out.DataLakeArn) + model.setID() + configurations, d := flattenDataLakeConfigurations(ctx, []*awstypes.DataLakeResource{out}) + diags.Append(d...) + + model.Configurations = configurations + + return diags +} + type dataLakeConfigurationsData struct { EncryptionConfiguration types.List `tfsdk:"encryption_configuration"` LifecycleConfiguration types.List `tfsdk:"lifecycle_configuration"` @@ -805,20 +827,3 @@ func extractRegionFromARN(arn string) (string, error) { } return parts[3], nil } - -func (rd *resourceDataLakeData) refreshFromOutput(ctx context.Context, out *awstypes.DataLakeResource) diag.Diagnostics { - var diags diag.Diagnostics - - if out == nil { - return diags - } - - rd.DataLakeArn = flex.StringToFramework(ctx, out.DataLakeArn) - rd.ID = flex.StringToFramework(ctx, out.DataLakeArn) - configurations, d := flattenDataLakeConfigurations(ctx, []*awstypes.DataLakeResource{out}) - diags.Append(d...) - - rd.Configurations = configurations - - return diags -} From fadedce0b71d79d94506585bb8bc9463b0646344 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 1 Dec 2023 14:26:43 -0500 Subject: [PATCH 11/45] r/aws_securitylake_data_lake: Transparent tagging. --- internal/service/securitylake/data_lake.go | 49 ++++++++++--------- internal/service/securitylake/exports_test.go | 2 +- .../securitylake/service_package_gen.go | 5 +- .../r/securitylake_data_lake.html.markdown | 6 ++- 4 files changed, 34 insertions(+), 28 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 08620d3a4769..3dd0c6c12bb9 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" @@ -36,10 +35,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// Function annotations are used for resource registration to the Provider. DO NOT EDIT. // @FrameworkResource(name="Data Lake") -func newResourceDataLake(_ context.Context) (resource.ResourceWithConfigure, error) { - r := &resourceDataLake{} +// @Tags(identifierAttribute="arn") +func newDataLakeResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &dataLakeResource{} r.SetDefaultCreateTimeout(30 * time.Minute) r.SetDefaultUpdateTimeout(30 * time.Minute) @@ -52,27 +51,29 @@ const ( ResNameDataLake = "Data Lake" ) -type resourceDataLake struct { +type dataLakeResource struct { framework.ResourceWithConfigure + framework.WithImportByID framework.WithTimeouts } -func (r *resourceDataLake) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { +func (r *dataLakeResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { resp.TypeName = "aws_securitylake_data_lake" } -func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { +func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - "datalake_arn": framework.ARNAttributeComputedOnly(), + "arn": framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), "meta_store_manager_role_arn": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, }, - names.AttrTags: tftags.TagsAttribute(), - names.AttrID: framework.IDAttribute(), + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), }, Blocks: map[string]schema.Block{ "configurations": schema.SetNestedBlock{ @@ -159,8 +160,8 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques } } -func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - var plan datalakeResourceModel +func (r *dataLakeResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan dataLakeResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) if resp.Diagnostics.HasError() { return @@ -207,10 +208,10 @@ func (r *resourceDataLake) Create(ctx context.Context, req resource.CreateReques resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } -func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { +func (r *dataLakeResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { conn := r.Meta().SecurityLakeClient(ctx) - var state datalakeResourceModel + var state dataLakeResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { @@ -235,10 +236,10 @@ func (r *resourceDataLake) Read(ctx context.Context, req resource.ReadRequest, r resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } -func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +func (r *dataLakeResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { conn := r.Meta().SecurityLakeClient(ctx) - var plan, state datalakeResourceModel + var plan, state dataLakeResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { @@ -288,10 +289,10 @@ func (r *resourceDataLake) Update(ctx context.Context, req resource.UpdateReques resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } -func (r *resourceDataLake) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +func (r *dataLakeResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { conn := r.Meta().SecurityLakeClient(ctx) - var state datalakeResourceModel + var state dataLakeResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { return @@ -328,8 +329,8 @@ func (r *resourceDataLake) Delete(ctx context.Context, req resource.DeleteReques } } -func (r *resourceDataLake) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +func (r *dataLakeResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) } func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*awstypes.DataLakeResource, error) { @@ -760,8 +761,8 @@ var ( } ) -type datalakeResourceModel struct { - DataLakeArn types.String `tfsdk:"datalake_arn"` +type dataLakeResourceModel struct { + DataLakeArn types.String `tfsdk:"arn"` ID types.String `tfsdk:"id"` MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` Configurations types.Set `tfsdk:"configurations"` @@ -769,11 +770,11 @@ type datalakeResourceModel struct { Timeouts timeouts.Value `tfsdk:"timeouts"` } -func (model *datalakeResourceModel) setID() { +func (model *dataLakeResourceModel) setID() { model.ID = model.DataLakeArn } -func (model *datalakeResourceModel) refreshFromOutput(ctx context.Context, out *awstypes.DataLakeResource) diag.Diagnostics { +func (model *dataLakeResourceModel) refreshFromOutput(ctx context.Context, out *awstypes.DataLakeResource) diag.Diagnostics { var diags diag.Diagnostics if out == nil { diff --git a/internal/service/securitylake/exports_test.go b/internal/service/securitylake/exports_test.go index 6d239c0a19fc..0b40c6af84c2 100644 --- a/internal/service/securitylake/exports_test.go +++ b/internal/service/securitylake/exports_test.go @@ -5,5 +5,5 @@ package securitylake // Exports for use in tests only. var ( - ResourceDataLake = newResourceDataLake + ResourceDataLake = newDataLakeResource ) diff --git a/internal/service/securitylake/service_package_gen.go b/internal/service/securitylake/service_package_gen.go index 7f35a30258e9..6168650626f3 100644 --- a/internal/service/securitylake/service_package_gen.go +++ b/internal/service/securitylake/service_package_gen.go @@ -21,8 +21,11 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { return []*types.ServicePackageFrameworkResource{ { - Factory: newResourceDataLake, + Factory: newDataLakeResource, Name: "Data Lake", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, }, } } diff --git a/website/docs/r/securitylake_data_lake.html.markdown b/website/docs/r/securitylake_data_lake.html.markdown index 7fd91fa42400..a9ef1bfb1a5d 100644 --- a/website/docs/r/securitylake_data_lake.html.markdown +++ b/website/docs/r/securitylake_data_lake.html.markdown @@ -60,8 +60,9 @@ resource "aws_securitylake_data_lake" "example" { The following arguments are required: -* `meta_store_manager_role_arn` - (Required)The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources. +* `meta_store_manager_role_arn` - (Required) The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources. * `configurations` - (Required) Specify the Region or Regions that will contribute data to the rollup region. +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. Configurations support the following: @@ -97,7 +98,8 @@ Replication Configuration support the following: This resource exports the following attributes in addition to the arguments above: -* `arn` - ARN of the Data Lake. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `arn` - ARN of the Data Lake. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts From a54f34219876fa716ab78a9536946bf5406efb37 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 1 Dec 2023 14:34:30 -0500 Subject: [PATCH 12/45] r/aws_securitylake_data_lake: 'configurations' -> 'configuration' and 'transitions' -> 'transition'. --- internal/service/securitylake/data_lake.go | 23 ++-- .../service/securitylake/data_lake_test.go | 108 +++++++++--------- .../r/securitylake_data_lake.html.markdown | 14 +-- 3 files changed, 72 insertions(+), 73 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 3dd0c6c12bb9..04b3856f3b8a 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -15,7 +15,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/securitylake/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" - "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/resource" @@ -76,10 +75,10 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), }, Blocks: map[string]schema.Block{ - "configurations": schema.SetNestedBlock{ - Validators: []validator.Set{ - setvalidator.SizeAtLeast(1), - setvalidator.SizeAtMost(1), + "configuration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ @@ -117,7 +116,7 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques }, }, }, - "transitions": schema.SetNestedBlock{ + "transition": schema.SetNestedBlock{ NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "days": schema.Int64Attribute{ @@ -513,8 +512,8 @@ func flattenLifeCycleConfiguration(ctx context.Context, apiObject *awstypes.Data diags.Append(d...) obj := map[string]attr.Value{ - "expiration": expiration, - "transitions": transitions, + "expiration": expiration, + "transition": transitions, } objVal, d := types.ObjectValue(dataLakeConfigurationsLifecycleTypes, obj) diags.Append(d...) @@ -751,8 +750,8 @@ var ( } dataLakeConfigurationsLifecycleTypes = map[string]attr.Type{ - "expiration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes}}, - "transitions": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes}}, + "expiration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes}}, + "transition": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes}}, } dataLakeConfigurationsReplicationConfigurationTypes = map[string]attr.Type{ @@ -765,7 +764,7 @@ type dataLakeResourceModel struct { DataLakeArn types.String `tfsdk:"arn"` ID types.String `tfsdk:"id"` MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` - Configurations types.Set `tfsdk:"configurations"` + Configurations types.Set `tfsdk:"configuration"` Tags types.Map `tfsdk:"tags"` Timeouts timeouts.Value `tfsdk:"timeouts"` } @@ -804,7 +803,7 @@ type dataLakeConfigurationsEncryption struct { type dataLakeConfigurationsLifecycle struct { Expiration types.List `tfsdk:"expiration"` - Transitions types.Set `tfsdk:"transitions"` + Transitions types.Set `tfsdk:"transition"` } type dataLakeConfigurationsLifecycleExpiration struct { diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 340dcae41704..0d913b9f580a 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -45,9 +45,9 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), - resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), ), }, { @@ -84,17 +84,17 @@ func TestAccSecurityLakeDataLake_lifeCycle(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), - resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "2"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.storage_class", "STANDARD_IA"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.1.days", "80"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.1.storage_class", "ONEZONE_IA"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.0.days", "300"), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "2"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.days", "80"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.storage_class", "ONEZONE_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), ), }, { @@ -131,17 +131,17 @@ func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), - resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "2"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.storage_class", "STANDARD_IA"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.1.days", "80"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.1.storage_class", "ONEZONE_IA"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.0.days", "300"), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "2"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.days", "80"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.storage_class", "ONEZONE_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), ), }, { @@ -155,15 +155,15 @@ func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), - resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.storage_class", "STANDARD_IA"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.0.days", "300"), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), ), }, { @@ -200,18 +200,18 @@ func TestAccSecurityLakeDataLake_replication(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), - resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.storage_class", "STANDARD_IA"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.expiration.0.days", "300"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.replication_configuration.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "configurations.0.replication_configuration.0.role_arn", "aws_iam_role.datalake_s3_replication", "arn"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.replication_configuration.0.regions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "configuration.0.replication_configuration.0.role_arn", "aws_iam_role.datalake_s3_replication", "arn"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.0.regions.#", "1"), ), }, { @@ -477,7 +477,7 @@ func testAccDataLakeConfig_basic(rName string) string { resource "aws_securitylake_data_lake" "test" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn - configurations { + configuration { region = "eu-west-1" encryption_configuration { @@ -500,7 +500,7 @@ func testAccDataLakeConfig_lifeCycle(rName string) string { resource "aws_securitylake_data_lake" "test" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn - configurations { + configuration { region = "eu-west-1" encryption_configuration { @@ -508,11 +508,11 @@ resource "aws_securitylake_data_lake" "test" { } lifecycle_configuration { - transitions { + transition { days = 31 storage_class = "STANDARD_IA" } - transitions { + transition { days = 80 storage_class = "ONEZONE_IA" } @@ -536,7 +536,7 @@ func testAccDataLakeConfig_lifeCycleUpdate(rName string) string { resource "aws_securitylake_data_lake" "test" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn - configurations { + configuration { region = "eu-west-1" encryption_configuration { @@ -544,7 +544,7 @@ resource "aws_securitylake_data_lake" "test" { } lifecycle_configuration { - transitions { + transition { days = 31 storage_class = "STANDARD_IA" } @@ -569,7 +569,7 @@ func testAccDataLakeConfig_replication(rName string) string { resource "aws_securitylake_data_lake" "region_2" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn - configurations { + configuration { region = "eu-west-2" encryption_configuration { @@ -577,7 +577,7 @@ resource "aws_securitylake_data_lake" "region_2" { } lifecycle_configuration { - transitions { + transition { days = 31 storage_class = "STANDARD_IA" } diff --git a/website/docs/r/securitylake_data_lake.html.markdown b/website/docs/r/securitylake_data_lake.html.markdown index a9ef1bfb1a5d..2844ce563112 100644 --- a/website/docs/r/securitylake_data_lake.html.markdown +++ b/website/docs/r/securitylake_data_lake.html.markdown @@ -16,7 +16,7 @@ Terraform resource for managing an AWS Security Lake Data Lake. resource "aws_securitylake_data_lake" "example" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn - configurations { + configuration { region = "eu-west-1" encryption_configuration { @@ -24,11 +24,11 @@ resource "aws_securitylake_data_lake" "example" { } lifecycle_configuration { - transitions { + transition { days = 31 storage_class = "STANDARD_IA" } - transitions { + transition { days = 80 storage_class = "ONEZONE_IA" } @@ -46,7 +46,7 @@ resource "aws_securitylake_data_lake" "example" { resource "aws_securitylake_data_lake" "example" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn - configurations { + configuration { region = "eu-west-1" encryption_configuration { @@ -61,7 +61,7 @@ resource "aws_securitylake_data_lake" "example" { The following arguments are required: * `meta_store_manager_role_arn` - (Required) The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources. -* `configurations` - (Required) Specify the Region or Regions that will contribute data to the rollup region. +* `configuration` - (Required) Specify the Region or Regions that will contribute data to the rollup region. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. Configurations support the following: @@ -78,7 +78,7 @@ Encryption Configuration support the following: Lifecycle Configuration support the following: * `expiration` - (Optional) Provides data expiration details of Amazon Security Lake object. -* `transitions` - (Optional) Provides data storage transition details of Amazon Security Lake object. +* `transition` - (Optional) Provides data storage transition details of Amazon Security Lake object. Expiration Configuration support the following: @@ -86,7 +86,7 @@ Expiration Configuration support the following: Transitions support the following: -* `days` - (Optional) Number of days before data transitions to a different S3 Storage Class in the Amazon Security Lake object. +* `days` - (Optional) Number of days before data transition to a different S3 Storage Class in the Amazon Security Lake object. * `storage_class` - (Optional) The range of storage classes that you can choose from based on the data access, resiliency, and cost requirements of your workloads. Replication Configuration support the following: From 8928b5d9c968c500822e15a8c3170a1aa2eaa18a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 1 Dec 2023 14:53:50 -0500 Subject: [PATCH 13/45] r/aws_securitylake_data_lake: Start to think about AutoFlEx. --- internal/service/securitylake/data_lake.go | 87 ++++++++++++---------- 1 file changed, 47 insertions(+), 40 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 04b3856f3b8a..7da02b647e71 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -29,6 +29,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -66,7 +67,8 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques "arn": framework.ARNAttributeComputedOnly(), names.AttrID: framework.IDAttribute(), "meta_store_manager_role_arn": schema.StringAttribute{ - Required: true, + CustomType: fwtypes.ARNType, + Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, @@ -88,6 +90,9 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques }, Blocks: map[string]schema.Block{ "encryption_configuration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "kms_key_id": schema.StringAttribute{ @@ -137,12 +142,13 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ - "regions": schema.ListAttribute{ + "regions": schema.SetAttribute{ ElementType: types.StringType, Optional: true, }, "role_arn": schema.StringAttribute{ - Optional: true, + CustomType: fwtypes.ARNType, + Optional: true, }, }, }, @@ -168,7 +174,7 @@ func (r *dataLakeResource) Create(ctx context.Context, req resource.CreateReques conn := r.Meta().SecurityLakeClient(ctx) - var configurations []dataLakeConfigurationsData + var configurations []dataLakeConfigurationModel resp.Diagnostics.Append(plan.Configurations.ElementsAs(ctx, &configurations, false)...) if resp.Diagnostics.HasError() { @@ -177,7 +183,7 @@ func (r *dataLakeResource) Create(ctx context.Context, req resource.CreateReques in := &securitylake.CreateDataLakeInput{ Configurations: expandDataLakeConfigurations(ctx, configurations), - MetaStoreManagerRoleArn: aws.String(plan.MetaStoreManagerRoleArn.ValueString()), + MetaStoreManagerRoleArn: aws.String(plan.MetaStoreManagerRoleARN.ValueString()), Tags: getTagsIn(ctx), } @@ -191,7 +197,7 @@ func (r *dataLakeResource) Create(ctx context.Context, req resource.CreateReques return } - plan.DataLakeArn = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) + plan.DataLakeARN = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) plan.setID() createTimeout := r.CreateTimeout(ctx, plan.Timeouts) @@ -246,7 +252,7 @@ func (r *dataLakeResource) Update(ctx context.Context, req resource.UpdateReques } if !plan.Configurations.Equal(state.Configurations) { - var configurations []dataLakeConfigurationsData + var configurations []dataLakeConfigurationModel resp.Diagnostics.Append(plan.Configurations.ElementsAs(ctx, &configurations, false)...) if resp.Diagnostics.HasError() { return @@ -592,16 +598,16 @@ func flattenReplicationConfiguration(ctx context.Context, apiObject *awstypes.Da return listVal, diags } -func expandDataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigurationsData) []awstypes.DataLakeConfiguration { +func expandDataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigurationModel) []awstypes.DataLakeConfiguration { var diags diag.Diagnostics if len(tfList) == 0 { return nil } var apiObject []awstypes.DataLakeConfiguration - var encryptionConfiguration []dataLakeConfigurationsEncryption - var lifecycleConfiguration []dataLakeConfigurationsLifecycle - var replicationConfiguration []dataLakeConfigurationsReplicationConfiguration + var encryptionConfiguration []dataLakeEncryptionConfigurationModel + var lifecycleConfiguration []dataLakeLifecycleConfigurationModel + var replicationConfiguration []dataLakeReplicationConfigurationModel for _, tfObj := range tfList { diags.Append(tfObj.LifecycleConfiguration.ElementsAs(ctx, &lifecycleConfiguration, false)...) @@ -631,7 +637,7 @@ func expandDataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigur return apiObject } -func expandEncryptionConfiguration(tfList []dataLakeConfigurationsEncryption) *awstypes.DataLakeEncryptionConfiguration { +func expandEncryptionConfiguration(tfList []dataLakeEncryptionConfigurationModel) *awstypes.DataLakeEncryptionConfiguration { if len(tfList) == 0 { return nil } @@ -645,7 +651,7 @@ func expandEncryptionConfiguration(tfList []dataLakeConfigurationsEncryption) *a return apiObject } -func expandLifecycleConfiguration(ctx context.Context, tfList []dataLakeConfigurationsLifecycle) (*awstypes.DataLakeLifecycleConfiguration, diag.Diagnostics) { +func expandLifecycleConfiguration(ctx context.Context, tfList []dataLakeLifecycleConfigurationModel) (*awstypes.DataLakeLifecycleConfiguration, diag.Diagnostics) { var diags diag.Diagnostics if len(tfList) == 0 { @@ -653,9 +659,9 @@ func expandLifecycleConfiguration(ctx context.Context, tfList []dataLakeConfigur } tfObj := tfList[0] - var transitions []dataLakeConfigurationsLifecycleTransitions + var transitions []dataLakeLifecycleTransitionModel diags.Append(tfObj.Transitions.ElementsAs(ctx, &transitions, false)...) - var expiration []dataLakeConfigurationsLifecycleExpiration + var expiration []dataLakeLifecycleExpirationModel diags.Append(tfObj.Expiration.ElementsAs(ctx, &expiration, false)...) apiObject := &awstypes.DataLakeLifecycleConfiguration{} @@ -670,7 +676,7 @@ func expandLifecycleConfiguration(ctx context.Context, tfList []dataLakeConfigur return apiObject, diags } -func expandLifecycleExpiration(tfList []dataLakeConfigurationsLifecycleExpiration) *awstypes.DataLakeLifecycleExpiration { +func expandLifecycleExpiration(tfList []dataLakeLifecycleExpirationModel) *awstypes.DataLakeLifecycleExpiration { if len(tfList) == 0 { return nil } @@ -685,7 +691,7 @@ func expandLifecycleExpiration(tfList []dataLakeConfigurationsLifecycleExpiratio return apiObject } -func expandLifecycleTransitions(tfList []dataLakeConfigurationsLifecycleTransitions) []awstypes.DataLakeLifecycleTransition { +func expandLifecycleTransitions(tfList []dataLakeLifecycleTransitionModel) []awstypes.DataLakeLifecycleTransition { if len(tfList) == 0 { return nil } @@ -709,7 +715,7 @@ func expandLifecycleTransitions(tfList []dataLakeConfigurationsLifecycleTransiti return apiObject } -func expandReplicationConfiguration(ctx context.Context, tfList []dataLakeConfigurationsReplicationConfiguration) *awstypes.DataLakeReplicationConfiguration { +func expandReplicationConfiguration(ctx context.Context, tfList []dataLakeReplicationConfigurationModel) *awstypes.DataLakeReplicationConfiguration { if len(tfList) == 0 { return nil } @@ -717,12 +723,12 @@ func expandReplicationConfiguration(ctx context.Context, tfList []dataLakeConfig tfObj := tfList[0] apiObject := &awstypes.DataLakeReplicationConfiguration{} - if !tfObj.RoleArn.IsNull() { - apiObject.RoleArn = aws.String(tfObj.RoleArn.ValueString()) + if !tfObj.RoleARN.IsNull() { + apiObject.RoleArn = aws.String(tfObj.RoleARN.ValueString()) } if !tfObj.Regions.IsNull() { - apiObject.Regions = flex.ExpandFrameworkStringValueList(ctx, tfObj.Regions) + apiObject.Regions = flex.ExpandFrameworkStringValueSet(ctx, tfObj.Regions) } return apiObject @@ -761,16 +767,17 @@ var ( ) type dataLakeResourceModel struct { - DataLakeArn types.String `tfsdk:"arn"` - ID types.String `tfsdk:"id"` - MetaStoreManagerRoleArn types.String `tfsdk:"meta_store_manager_role_arn"` Configurations types.Set `tfsdk:"configuration"` + DataLakeARN types.String `tfsdk:"arn"` + ID types.String `tfsdk:"id"` + MetaStoreManagerRoleARN fwtypes.ARN `tfsdk:"meta_store_manager_role_arn"` Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` Timeouts timeouts.Value `tfsdk:"timeouts"` } func (model *dataLakeResourceModel) setID() { - model.ID = model.DataLakeArn + model.ID = model.DataLakeARN } func (model *dataLakeResourceModel) refreshFromOutput(ctx context.Context, out *awstypes.DataLakeResource) diag.Diagnostics { @@ -780,7 +787,7 @@ func (model *dataLakeResourceModel) refreshFromOutput(ctx context.Context, out * return diags } - model.DataLakeArn = flex.StringToFramework(ctx, out.DataLakeArn) + model.DataLakeARN = flex.StringToFramework(ctx, out.DataLakeArn) model.setID() configurations, d := flattenDataLakeConfigurations(ctx, []*awstypes.DataLakeResource{out}) diags.Append(d...) @@ -790,34 +797,34 @@ func (model *dataLakeResourceModel) refreshFromOutput(ctx context.Context, out * return diags } -type dataLakeConfigurationsData struct { - EncryptionConfiguration types.List `tfsdk:"encryption_configuration"` - LifecycleConfiguration types.List `tfsdk:"lifecycle_configuration"` - Region types.String `tfsdk:"region"` - ReplicationConfiguration types.List `tfsdk:"replication_configuration"` +type dataLakeConfigurationModel struct { + EncryptionConfiguration fwtypes.ListNestedObjectValueOf[dataLakeEncryptionConfigurationModel] `tfsdk:"encryption_configuration"` + LifecycleConfiguration fwtypes.ListNestedObjectValueOf[dataLakeLifecycleConfigurationModel] `tfsdk:"lifecycle_configuration"` + Region types.String `tfsdk:"region"` + ReplicationConfiguration fwtypes.ListNestedObjectValueOf[dataLakeReplicationConfigurationModel] `tfsdk:"replication_configuration"` } -type dataLakeConfigurationsEncryption struct { +type dataLakeEncryptionConfigurationModel struct { KmsKeyID types.String `tfsdk:"kms_key_id"` } -type dataLakeConfigurationsLifecycle struct { - Expiration types.List `tfsdk:"expiration"` - Transitions types.Set `tfsdk:"transition"` +type dataLakeLifecycleConfigurationModel struct { + Expiration fwtypes.ListNestedObjectValueOf[dataLakeLifecycleExpirationModel] `tfsdk:"expiration"` + Transitions fwtypes.SetNestedObjectValueOf[dataLakeLifecycleTransitionModel] `tfsdk:"transition"` } -type dataLakeConfigurationsLifecycleExpiration struct { +type dataLakeLifecycleExpirationModel struct { Days types.Int64 `tfsdk:"days"` } -type dataLakeConfigurationsLifecycleTransitions struct { +type dataLakeLifecycleTransitionModel struct { Days types.Int64 `tfsdk:"days"` StorageClass types.String `tfsdk:"storage_class"` } -type dataLakeConfigurationsReplicationConfiguration struct { - RoleArn types.String `tfsdk:"role_arn"` - Regions types.List `tfsdk:"regions"` +type dataLakeReplicationConfigurationModel struct { + Regions types.Set `tfsdk:"regions"` + RoleARN fwtypes.ARN `tfsdk:"role_arn"` } func extractRegionFromARN(arn string) (string, error) { From c987f4f74ea29dbde5bc27ebcb11722eea74cc93 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 1 Dec 2023 15:28:02 -0500 Subject: [PATCH 14/45] r/aws_securitylake_data_lake: Add 'findDataLakes' and friends. --- internal/service/securitylake/data_lake.go | 132 ++++++++++++++++----- 1 file changed, 104 insertions(+), 28 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 7da02b647e71..892a01e32d5b 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -11,6 +11,7 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/securitylake" awstypes "github.com/aws/aws-sdk-go-v2/service/securitylake/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" @@ -27,9 +28,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -338,75 +341,89 @@ func (r *dataLakeResource) ModifyPlan(ctx context.Context, request resource.Modi r.SetTagsAll(ctx, request, response) } -func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*awstypes.DataLakeResource, error) { +func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, arn string, timeout time.Duration) (*awstypes.DataLakeResource, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.DataLakeStatusInitialized), - Target: enum.Slice(awstypes.DataLakeStatusCompleted), - Refresh: createStatusDataLake(ctx, conn, id), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 2, + Pending: enum.Slice(awstypes.DataLakeStatusInitialized), + Target: enum.Slice(awstypes.DataLakeStatusCompleted), + Refresh: statusDataLakeCreate(ctx, conn, arn), + Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*awstypes.DataLakeResource); ok { - return out, err + + if output, ok := outputRaw.(*awstypes.DataLakeResource); ok { + return output, err } return nil, err } -func waitDataLakeUpdated(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { +func waitDataLakeUpdated(ctx context.Context, conn *securitylake.Client, arn string, timeout time.Duration) (*awstypes.DataLakeResource, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.DataLakeStatusPending, awstypes.DataLakeStatusInitialized), - Target: enum.Slice(awstypes.DataLakeStatusCompleted), - Refresh: updateStatusDataLake(ctx, conn, id), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 2, + Pending: enum.Slice(awstypes.DataLakeStatusPending, awstypes.DataLakeStatusInitialized), + Target: enum.Slice(awstypes.DataLakeStatusCompleted), + Refresh: statusDataLakeUpdate(ctx, conn, arn), + Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*securitylake.ListDataLakesOutput); ok { - return out, err + + if output, ok := outputRaw.(*awstypes.DataLakeResource); ok { + if v := output.UpdateStatus; v != nil { + if v := v.Exception; v != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(v.Code), aws.ToString(v.Reason))) + } + } + + return output, err } return nil, err } -func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, id string, timeout time.Duration) (*securitylake.ListDataLakesOutput, error) { +func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, arn string, timeout time.Duration) (*awstypes.DataLakeResource, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.DataLakeStatusInitialized, awstypes.DataLakeStatusCompleted), Target: []string{}, - Refresh: createStatusDataLake(ctx, conn, id), + Refresh: statusDataLakeUpdate(ctx, conn, arn), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*securitylake.ListDataLakesOutput); ok { - return out, err + + if output, ok := outputRaw.(*awstypes.DataLakeResource); ok { + if v := output.UpdateStatus; v != nil { + if v := v.Exception; v != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(v.Code), aws.ToString(v.Reason))) + } + } + + return output, err } return nil, err } -func createStatusDataLake(ctx context.Context, conn *securitylake.Client, id string) retry.StateRefreshFunc { +func statusDataLakeCreate(ctx context.Context, conn *securitylake.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := FindDataLakeByID(ctx, conn, id) + output, err := findDataLakeByARN(ctx, conn, arn) + if tfresource.NotFound(err) { return nil, "", nil } + if err != nil { return nil, "", err } - return out, string(out.CreateStatus), nil + return output, string(output.CreateStatus), nil } } -func updateStatusDataLake(ctx context.Context, conn *securitylake.Client, id string) retry.StateRefreshFunc { +func statusDataLakeUpdate(ctx context.Context, conn *securitylake.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := FindDataLakeByID(ctx, conn, id) + output, err := findDataLakeByARN(ctx, conn, arn) + if tfresource.NotFound(err) { return nil, "", nil } @@ -414,7 +431,12 @@ func updateStatusDataLake(ctx context.Context, conn *securitylake.Client, id str if err != nil { return nil, "", err } - return out, string(out.UpdateStatus.Status), nil + + if output.UpdateStatus == nil { + return nil, "", nil + } + + return output, string(output.UpdateStatus.Status), nil } } @@ -834,3 +856,57 @@ func extractRegionFromARN(arn string) (string, error) { } return parts[3], nil } + +func findDataLakeByARN(ctx context.Context, conn *securitylake.Client, arn string) (*awstypes.DataLakeResource, error) { + input := &securitylake.ListDataLakesInput{ + Regions: []string{errs.Must(regionFromARNString(arn))}, + } + + return findDataLake(ctx, conn, input, func(v *awstypes.DataLakeResource) bool { + return aws.ToString(v.DataLakeArn) == arn + }) +} + +func findDataLake(ctx context.Context, conn *securitylake.Client, input *securitylake.ListDataLakesInput, filter tfslices.Predicate[*awstypes.DataLakeResource]) (*awstypes.DataLakeResource, error) { + output, err := findDataLakes(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findDataLakes(ctx context.Context, conn *securitylake.Client, input *securitylake.ListDataLakesInput, filter tfslices.Predicate[*awstypes.DataLakeResource]) ([]*awstypes.DataLakeResource, error) { + var dataLakes []*awstypes.DataLakeResource + + output, err := conn.ListDataLakes(ctx, input) + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + for _, v := range output.DataLakes { + v := v + if v := &v; filter(v) { + dataLakes = append(dataLakes, v) + } + } + + return dataLakes, nil +} + +// regionFromARNString return the AWS Region from the specified ARN string. +func regionFromARNString(s string) (string, error) { + v, err := arn.Parse(s) + + if err != nil { + return "", err + } + + return v.Region, nil +} From 20dacb9cbcbcebf989eeaa1c390d304b31801ca6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 1 Dec 2023 15:39:09 -0500 Subject: [PATCH 15/45] r/aws_securitylake_data_lake: Tidy up Delete. --- internal/service/securitylake/data_lake.go | 28 ++++++++++------------ 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 892a01e32d5b..cd10572fc547 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -300,37 +300,33 @@ func (r *dataLakeResource) Update(ctx context.Context, req resource.UpdateReques func (r *dataLakeResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { conn := r.Meta().SecurityLakeClient(ctx) - var state dataLakeResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + var data dataLakeResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } - region, _ := extractRegionFromARN(state.ID.ValueString()) + _, err := conn.DeleteDataLake(ctx, &securitylake.DeleteDataLakeInput{ + Regions: []string{errs.Must(regionFromARNString(data.ID.ValueString()))}, + }) - in := &securitylake.DeleteDataLakeInput{ - Regions: []string{region}, + // No data lake: + // "An error occurred (AccessDeniedException) when calling the DeleteDataLake operation: User: ... is not authorized to perform: securitylake:DeleteDataLake" + if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "is not authorized to perform") { + return } - _, err := conn.DeleteDataLake(ctx, in) - if err != nil { - var nfe *awstypes.ResourceNotFoundException - if errors.As(err, &nfe) { - return - } resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionDeleting, ResNameDataLake, state.ID.String(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionDeleting, ResNameDataLake, data.ID.String(), err), err.Error(), ) return } - deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) - _, err = waitDataLakeDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) - if err != nil { + if _, err = waitDataLakeDeleted(ctx, conn, data.ID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForDeletion, ResNameDataLake, state.ID.String(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForDeletion, ResNameDataLake, data.ID.String(), err), err.Error(), ) return From 57ab21b4a60ebd642e97bc80d3d184fe33eb10cd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 1 Dec 2023 16:15:06 -0500 Subject: [PATCH 16/45] r/aws_securitylake_data_lake: Use AutoFlEx for Create, Read and Update. --- internal/service/securitylake/data_lake.go | 476 ++------------------- 1 file changed, 43 insertions(+), 433 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index cd10572fc547..11c334d1d454 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -5,9 +5,7 @@ package securitylake import ( "context" - "errors" "fmt" - "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -16,8 +14,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/securitylake/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" @@ -169,132 +165,119 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques } func (r *dataLakeResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - var plan dataLakeResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + var data dataLakeResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } conn := r.Meta().SecurityLakeClient(ctx) - var configurations []dataLakeConfigurationModel - - resp.Diagnostics.Append(plan.Configurations.ElementsAs(ctx, &configurations, false)...) + input := &securitylake.CreateDataLakeInput{} + resp.Diagnostics.Append(flex.Expand(ctx, data, input)...) if resp.Diagnostics.HasError() { return } - in := &securitylake.CreateDataLakeInput{ - Configurations: expandDataLakeConfigurations(ctx, configurations), - MetaStoreManagerRoleArn: aws.String(plan.MetaStoreManagerRoleARN.ValueString()), - Tags: getTagsIn(ctx), - } + input.Tags = getTagsIn(ctx) - out, err := conn.CreateDataLake(ctx, in) + output, err := conn.CreateDataLake(ctx, input) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, plan.ID.ValueString(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, data.ID.ValueString(), err), err.Error(), ) return } - plan.DataLakeARN = flex.StringToFramework(ctx, out.DataLakes[0].DataLakeArn) - plan.setID() + // Set values for unknowns. + data.DataLakeARN = flex.StringToFramework(ctx, output.DataLakes[0].DataLakeArn) + data.setID() - createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - waitOut, err := waitDataLakeCreated(ctx, conn, plan.ID.ValueString(), createTimeout) - if err != nil { + if _, err := waitDataLakeCreated(ctx, conn, data.ID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)); err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, plan.ID.ValueString(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, data.ID.ValueString(), err), err.Error(), ) return } - resp.Diagnostics.Append(plan.refreshFromOutput(ctx, waitOut)...) - resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) } func (r *dataLakeResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { conn := r.Meta().SecurityLakeClient(ctx) - var state dataLakeResourceModel - - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + var data dataLakeResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } - out, err := FindDataLakeByID(ctx, conn, state.ID.ValueString()) + dataLake, err := findDataLakeByARN(ctx, conn, data.ID.ValueString()) if tfresource.NotFound(err) { resp.State.RemoveResource(ctx) return } + if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionSetting, ResNameDataLake, state.ID.String(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionSetting, ResNameDataLake, data.ID.String(), err), err.Error(), ) return } - resp.Diagnostics.Append(state.refreshFromOutput(ctx, out)...) - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + resp.Diagnostics.Append(flex.Flatten(ctx, dataLake, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) } func (r *dataLakeResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - conn := r.Meta().SecurityLakeClient(ctx) - - var plan, state dataLakeResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + var old, new dataLakeResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &new)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(req.State.Get(ctx, &old)...) if resp.Diagnostics.HasError() { return } - if !plan.Configurations.Equal(state.Configurations) { - var configurations []dataLakeConfigurationModel - resp.Diagnostics.Append(plan.Configurations.ElementsAs(ctx, &configurations, false)...) + conn := r.Meta().SecurityLakeClient(ctx) + + if !new.Configurations.Equal(old.Configurations) { + input := &securitylake.UpdateDataLakeInput{} + resp.Diagnostics.Append(flex.Expand(ctx, new, input)...) if resp.Diagnostics.HasError() { return } - in := &securitylake.UpdateDataLakeInput{ - Configurations: expandDataLakeConfigurations(ctx, configurations), - } + _, err := conn.UpdateDataLake(ctx, input) - out, err := conn.UpdateDataLake(ctx, in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.ValueString(), err), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, new.ID.ValueString(), err), err.Error(), ) return } - if out == nil || out.DataLakes == nil { + + if _, err := waitDataLakeUpdated(ctx, conn, new.ID.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)); err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, plan.ID.ValueString(), nil), - errors.New("empty output").Error(), + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForUpdate, ResNameDataLake, new.ID.ValueString(), err), + err.Error(), ) return } - - resp.Diagnostics.Append(state.refreshFromOutput(ctx, &out.DataLakes[0])...) } - updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) - _, err := waitDataLakeUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForUpdate, ResNameDataLake, plan.ID.ValueString(), err), - err.Error(), - ) - return - } - - resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &new)...) } func (r *dataLakeResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { @@ -436,354 +419,6 @@ func statusDataLakeUpdate(ctx context.Context, conn *securitylake.Client, arn st } } -func FindDataLakeByID(ctx context.Context, conn *securitylake.Client, id string) (*awstypes.DataLakeResource, error) { - region, err := extractRegionFromARN(id) - if err != nil { - return nil, err - } - - in := &securitylake.ListDataLakesInput{ - Regions: []string{region}, - } - - out, err := conn.ListDataLakes(ctx, in) - if err != nil { - var nfe *awstypes.ResourceNotFoundException - if errors.As(err, &nfe) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } - } - - return nil, err - } - - if out == nil || len(out.DataLakes) < 1 { - return nil, tfresource.NewEmptyResultError(in) - } - datalakeResource := out.DataLakes[0] - - return &datalakeResource, nil -} - -func flattenDataLakeConfigurations(ctx context.Context, apiObjects []*awstypes.DataLakeResource) (types.Set, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: dataLakeConfigurations} - - if len(apiObjects) == 0 { - return types.SetNull(elemType), diags - } - - elems := []attr.Value{} - for _, apiObject := range apiObjects { - encryptionConfiguration, d := flattenEncryptionConfiguration(ctx, apiObject.EncryptionConfiguration) - diags.Append(d...) - lifecycleExpiration, d := flattenLifeCycleConfiguration(ctx, apiObject.LifecycleConfiguration) - diags.Append(d...) - replicationConfiguration, d := flattenReplicationConfiguration(ctx, apiObject.ReplicationConfiguration) - diags.Append(d...) - - obj := map[string]attr.Value{ - "encryption_configuration": encryptionConfiguration, - "lifecycle_configuration": lifecycleExpiration, - "region": flex.StringToFramework(ctx, apiObject.Region), - "replication_configuration": replicationConfiguration, - } - objVal, d := types.ObjectValue(dataLakeConfigurations, obj) - diags.Append(d...) - - elems = append(elems, objVal) - } - - setVal, d := types.SetValue(elemType, elems) - diags.Append(d...) - - return setVal, diags -} - -func flattenEncryptionConfiguration(ctx context.Context, apiObject *awstypes.DataLakeEncryptionConfiguration) (types.List, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsEncryptionTypes} - - if apiObject == nil { - return types.ListNull(elemType), diags - } - - obj := map[string]attr.Value{ - "kms_key_id": flex.StringToFramework(ctx, apiObject.KmsKeyId), - } - objVal, d := types.ObjectValue(dataLakeConfigurationsEncryptionTypes, obj) - diags.Append(d...) - - listVal, d := types.ListValue(elemType, []attr.Value{objVal}) - diags.Append(d...) - - return listVal, diags -} - -func flattenLifeCycleConfiguration(ctx context.Context, apiObject *awstypes.DataLakeLifecycleConfiguration) (types.List, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTypes} - - if apiObject == nil || (apiObject.Expiration == nil && len(apiObject.Transitions) == 0) { - return types.ListNull(elemType), diags - } - - expiration, d := flattenLifecycleExpiration(ctx, apiObject.Expiration) - diags.Append(d...) - transitions, d := flattenLifecycleTransitions(ctx, apiObject.Transitions) - diags.Append(d...) - - obj := map[string]attr.Value{ - "expiration": expiration, - "transition": transitions, - } - objVal, d := types.ObjectValue(dataLakeConfigurationsLifecycleTypes, obj) - diags.Append(d...) - - listVal, d := types.ListValue(elemType, []attr.Value{objVal}) - diags.Append(d...) - - return listVal, diags -} - -func flattenLifecycleExpiration(ctx context.Context, apiObject *awstypes.DataLakeLifecycleExpiration) (types.List, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes} - - if apiObject == nil { - return types.ListNull(elemType), diags - } - - obj := map[string]attr.Value{ - "days": flex.Int32ToFramework(ctx, apiObject.Days), - } - - objVal, d := types.ObjectValue(dataLakeConfigurationsLifecycleExpirationTypes, obj) - diags.Append(d...) - - listVal, d := types.ListValue(elemType, []attr.Value{objVal}) - diags.Append(d...) - - return listVal, diags -} - -func flattenLifecycleTransitions(ctx context.Context, apiObjects []awstypes.DataLakeLifecycleTransition) (types.Set, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes} - - if len(apiObjects) == 0 { - return types.SetValueMust(elemType, []attr.Value{}), diags - } - - elems := []attr.Value{} - for _, apiObject := range apiObjects { - obj := map[string]attr.Value{ - "days": flex.Int32ToFramework(ctx, apiObject.Days), - "storage_class": flex.StringToFramework(ctx, apiObject.StorageClass), - } - objVal, d := types.ObjectValue(dataLakeConfigurationsLifecycleTransitionsTypes, obj) - diags.Append(d...) - - elems = append(elems, objVal) - } - - setVal, d := types.SetValue(elemType, elems) - diags.Append(d...) - - return setVal, diags -} - -func flattenReplicationConfiguration(ctx context.Context, apiObject *awstypes.DataLakeReplicationConfiguration) (types.List, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: dataLakeConfigurationsReplicationConfigurationTypes} - - if apiObject == nil || (apiObject.Regions == nil && apiObject.RoleArn == nil) { - return types.ListNull(elemType), diags - } - - obj := map[string]attr.Value{ - "role_arn": flex.StringToFramework(ctx, apiObject.RoleArn), - "regions": flex.FlattenFrameworkStringValueList(ctx, apiObject.Regions), - } - objVal, d := types.ObjectValue(dataLakeConfigurationsReplicationConfigurationTypes, obj) - diags.Append(d...) - - listVal, d := types.ListValue(elemType, []attr.Value{objVal}) - diags.Append(d...) - - return listVal, diags -} - -func expandDataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigurationModel) []awstypes.DataLakeConfiguration { - var diags diag.Diagnostics - if len(tfList) == 0 { - return nil - } - - var apiObject []awstypes.DataLakeConfiguration - var encryptionConfiguration []dataLakeEncryptionConfigurationModel - var lifecycleConfiguration []dataLakeLifecycleConfigurationModel - var replicationConfiguration []dataLakeReplicationConfigurationModel - - for _, tfObj := range tfList { - diags.Append(tfObj.LifecycleConfiguration.ElementsAs(ctx, &lifecycleConfiguration, false)...) - diags.Append(tfObj.ReplicationConfiguration.ElementsAs(ctx, &replicationConfiguration, false)...) - lifecycleConfiguration, d := expandLifecycleConfiguration(ctx, lifecycleConfiguration) - diags.Append(d...) - - item := awstypes.DataLakeConfiguration{ - Region: aws.String(tfObj.Region.ValueString()), - } - - if !tfObj.EncryptionConfiguration.IsNull() { - item.EncryptionConfiguration = expandEncryptionConfiguration(encryptionConfiguration) - } - - if !tfObj.LifecycleConfiguration.IsNull() { - item.LifecycleConfiguration = lifecycleConfiguration - } - - if !tfObj.ReplicationConfiguration.IsNull() { - item.ReplicationConfiguration = expandReplicationConfiguration(ctx, replicationConfiguration) - } - - apiObject = append(apiObject, item) - } - - return apiObject -} - -func expandEncryptionConfiguration(tfList []dataLakeEncryptionConfigurationModel) *awstypes.DataLakeEncryptionConfiguration { - if len(tfList) == 0 { - return nil - } - - tfObj := tfList[0] - apiObject := &awstypes.DataLakeEncryptionConfiguration{} - if !tfObj.KmsKeyID.IsNull() { - apiObject.KmsKeyId = aws.String(tfObj.KmsKeyID.ValueString()) - } - - return apiObject -} - -func expandLifecycleConfiguration(ctx context.Context, tfList []dataLakeLifecycleConfigurationModel) (*awstypes.DataLakeLifecycleConfiguration, diag.Diagnostics) { - var diags diag.Diagnostics - - if len(tfList) == 0 { - return nil, diags - } - - tfObj := tfList[0] - var transitions []dataLakeLifecycleTransitionModel - diags.Append(tfObj.Transitions.ElementsAs(ctx, &transitions, false)...) - var expiration []dataLakeLifecycleExpirationModel - diags.Append(tfObj.Expiration.ElementsAs(ctx, &expiration, false)...) - apiObject := &awstypes.DataLakeLifecycleConfiguration{} - - if !tfObj.Expiration.IsNull() { - apiObject.Expiration = expandLifecycleExpiration(expiration) - } - - if !tfObj.Transitions.IsNull() { - apiObject.Transitions = expandLifecycleTransitions(transitions) - } - - return apiObject, diags -} - -func expandLifecycleExpiration(tfList []dataLakeLifecycleExpirationModel) *awstypes.DataLakeLifecycleExpiration { - if len(tfList) == 0 { - return nil - } - - tfObj := tfList[0] - apiObject := &awstypes.DataLakeLifecycleExpiration{} - - if !tfObj.Days.IsNull() { - apiObject.Days = aws.Int32(int32(tfObj.Days.ValueInt64())) - } - - return apiObject -} - -func expandLifecycleTransitions(tfList []dataLakeLifecycleTransitionModel) []awstypes.DataLakeLifecycleTransition { - if len(tfList) == 0 { - return nil - } - - var apiObject []awstypes.DataLakeLifecycleTransition - - for _, tfObj := range tfList { - item := awstypes.DataLakeLifecycleTransition{} - - if !tfObj.Days.IsNull() { - item.Days = aws.Int32(int32(tfObj.Days.ValueInt64())) - } - - if !tfObj.StorageClass.IsNull() { - item.StorageClass = aws.String(tfObj.StorageClass.ValueString()) - } - - apiObject = append(apiObject, item) - } - - return apiObject -} - -func expandReplicationConfiguration(ctx context.Context, tfList []dataLakeReplicationConfigurationModel) *awstypes.DataLakeReplicationConfiguration { - if len(tfList) == 0 { - return nil - } - - tfObj := tfList[0] - apiObject := &awstypes.DataLakeReplicationConfiguration{} - - if !tfObj.RoleARN.IsNull() { - apiObject.RoleArn = aws.String(tfObj.RoleARN.ValueString()) - } - - if !tfObj.Regions.IsNull() { - apiObject.Regions = flex.ExpandFrameworkStringValueSet(ctx, tfObj.Regions) - } - - return apiObject -} - -var ( - dataLakeConfigurations = map[string]attr.Type{ - "encryption_configuration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsEncryptionTypes}}, - "lifecycle_configuration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTypes}}, - "region": types.StringType, - "replication_configuration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsReplicationConfigurationTypes}}, - } - - dataLakeConfigurationsEncryptionTypes = map[string]attr.Type{ - "kms_key_id": types.StringType, - } - - dataLakeConfigurationsLifecycleExpirationTypes = map[string]attr.Type{ - "days": types.Int64Type, - } - - dataLakeConfigurationsLifecycleTransitionsTypes = map[string]attr.Type{ - "days": types.Int64Type, - "storage_class": types.StringType, - } - - dataLakeConfigurationsLifecycleTypes = map[string]attr.Type{ - "expiration": types.ListType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleExpirationTypes}}, - "transition": types.SetType{ElemType: types.ObjectType{AttrTypes: dataLakeConfigurationsLifecycleTransitionsTypes}}, - } - - dataLakeConfigurationsReplicationConfigurationTypes = map[string]attr.Type{ - "role_arn": types.StringType, - "regions": types.ListType{ElemType: types.StringType}, - } -) - type dataLakeResourceModel struct { Configurations types.Set `tfsdk:"configuration"` DataLakeARN types.String `tfsdk:"arn"` @@ -798,23 +433,6 @@ func (model *dataLakeResourceModel) setID() { model.ID = model.DataLakeARN } -func (model *dataLakeResourceModel) refreshFromOutput(ctx context.Context, out *awstypes.DataLakeResource) diag.Diagnostics { - var diags diag.Diagnostics - - if out == nil { - return diags - } - - model.DataLakeARN = flex.StringToFramework(ctx, out.DataLakeArn) - model.setID() - configurations, d := flattenDataLakeConfigurations(ctx, []*awstypes.DataLakeResource{out}) - diags.Append(d...) - - model.Configurations = configurations - - return diags -} - type dataLakeConfigurationModel struct { EncryptionConfiguration fwtypes.ListNestedObjectValueOf[dataLakeEncryptionConfigurationModel] `tfsdk:"encryption_configuration"` LifecycleConfiguration fwtypes.ListNestedObjectValueOf[dataLakeLifecycleConfigurationModel] `tfsdk:"lifecycle_configuration"` @@ -845,14 +463,6 @@ type dataLakeReplicationConfigurationModel struct { RoleARN fwtypes.ARN `tfsdk:"role_arn"` } -func extractRegionFromARN(arn string) (string, error) { - parts := strings.Split(arn, ":") - if len(parts) < 4 { - return "", fmt.Errorf("invalid ARN: %s", arn) - } - return parts[3], nil -} - func findDataLakeByARN(ctx context.Context, conn *securitylake.Client, arn string) (*awstypes.DataLakeResource, error) { input := &securitylake.ListDataLakesInput{ Regions: []string{errs.Must(regionFromARNString(arn))}, From 35404273e60c9b4118de6f14133802b9d371a468 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 1 Dec 2023 16:16:20 -0500 Subject: [PATCH 17/45] r/aws_securitylake_data_lake: Tidy up acceptance tests. --- internal/service/securitylake/data_lake_test.go | 4 ++-- internal/service/securitylake/exports_test.go | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 0d913b9f580a..e73790172023 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -260,7 +260,7 @@ func testAccCheckDataLakeDestroy(ctx context.Context) resource.TestCheckFunc { continue } - _, err := tfsecuritylake.FindDataLakeByID(ctx, conn, rs.Primary.ID) + _, err := tfsecuritylake.FindDataLakeByARN(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue @@ -289,7 +289,7 @@ func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *type } conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityLakeClient(ctx) - resp, err := tfsecuritylake.FindDataLakeByID(ctx, conn, rs.Primary.ID) + resp, err := tfsecuritylake.FindDataLakeByARN(ctx, conn, rs.Primary.ID) if err != nil { return create.Error(names.SecurityLake, create.ErrActionCheckingExistence, tfsecuritylake.ResNameDataLake, rs.Primary.ID, err) } diff --git a/internal/service/securitylake/exports_test.go b/internal/service/securitylake/exports_test.go index 0b40c6af84c2..b9b764848c1d 100644 --- a/internal/service/securitylake/exports_test.go +++ b/internal/service/securitylake/exports_test.go @@ -6,4 +6,6 @@ package securitylake // Exports for use in tests only. var ( ResourceDataLake = newDataLakeResource + + FindDataLakeByARN = findDataLakeByARN ) From e2796edbf25bd9fded81e300a0b500a0cc53ddd4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 1 Dec 2023 16:31:12 -0500 Subject: [PATCH 18/45] r/aws_securitylake_data_lake: AutoFlEx fixes. --- internal/service/securitylake/data_lake.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 11c334d1d454..043b4b78bc21 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -77,6 +77,7 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques }, Blocks: map[string]schema.Block{ "configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeConfigurationModel](ctx), Validators: []validator.List{ listvalidator.SizeAtLeast(1), listvalidator.SizeAtMost(1), @@ -89,6 +90,7 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques }, Blocks: map[string]schema.Block{ "encryption_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeEncryptionConfigurationModel](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -103,12 +105,14 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques }, }, "lifecycle_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeLifecycleConfigurationModel](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, NestedObject: schema.NestedBlockObject{ Blocks: map[string]schema.Block{ "expiration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeLifecycleExpirationModel](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -121,6 +125,7 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques }, }, "transition": schema.SetNestedBlock{ + CustomType: fwtypes.NewSetNestedObjectTypeOf[dataLakeLifecycleTransitionModel](ctx), NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "days": schema.Int64Attribute{ @@ -136,6 +141,7 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques }, }, "replication_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeReplicationConfigurationModel](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -420,13 +426,13 @@ func statusDataLakeUpdate(ctx context.Context, conn *securitylake.Client, arn st } type dataLakeResourceModel struct { - Configurations types.Set `tfsdk:"configuration"` - DataLakeARN types.String `tfsdk:"arn"` - ID types.String `tfsdk:"id"` - MetaStoreManagerRoleARN fwtypes.ARN `tfsdk:"meta_store_manager_role_arn"` - Tags types.Map `tfsdk:"tags"` - TagsAll types.Map `tfsdk:"tags_all"` - Timeouts timeouts.Value `tfsdk:"timeouts"` + Configurations fwtypes.ListNestedObjectValueOf[dataLakeConfigurationModel] `tfsdk:"configuration"` + DataLakeARN types.String `tfsdk:"arn"` + ID types.String `tfsdk:"id"` + MetaStoreManagerRoleARN fwtypes.ARN `tfsdk:"meta_store_manager_role_arn"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` } func (model *dataLakeResourceModel) setID() { From 276b23fccffa323035d4c9b040d86147e0806ea5 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Mon, 4 Dec 2023 22:51:52 +0000 Subject: [PATCH 19/45] Fixed the computed block issue --- internal/service/securitylake/data_lake.go | 23 +++++++-------- .../service/securitylake/data_lake_test.go | 28 ++++++++++++++++--- 2 files changed, 36 insertions(+), 15 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 08620d3a4769..5f9a8e64230f 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -21,8 +21,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" @@ -85,19 +85,20 @@ func (r *resourceDataLake) Schema(ctx context.Context, req resource.SchemaReques "region": schema.StringAttribute{ Required: true, }, - }, - Blocks: map[string]schema.Block{ - "encryption_configuration": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "kms_key_id": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString("S3_MANAGED_KEY"), - }, + "encryption_configuration": schema.ListAttribute{ + Computed: true, + Optional: true, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "kms_key_id": types.StringType, }, }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, }, + }, + Blocks: map[string]schema.Block{ "lifecycle_configuration": schema.ListNestedBlock{ Validators: []validator.List{ listvalidator.SizeAtMost(1), diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 340dcae41704..95a4d164eb96 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -305,6 +305,8 @@ func testAccDataLakeConfigBaseConfig(rName string) string { return fmt.Sprintf(` + + data "aws_caller_identity" "current" {} resource "aws_iam_role" "meta_store_manager" { @@ -467,6 +469,28 @@ resource "aws_iam_role_policy" "datalake_s3_replication" { } EOF } + +resource "aws_kms_key" "test" { + description = %[1]q + + policy = jsonencode({ + Version = "2012-10-17" + Id = %[1]q + + Statement = [{ + Sid = "Enable IAM User Permissions" + Effect = "Allow" + Action = "kms:*" + Resource = "*" + + Principal = { + AWS = "*" + } + }] + }) +} + + `, rName) } @@ -479,10 +503,6 @@ resource "aws_securitylake_data_lake" "test" { configurations { region = "eu-west-1" - - encryption_configuration { - kms_key_id = "S3_MANAGED_KEY" - } } tags = { Name = %[1]q From 9984e3d93343e1cd902c389501fef31be5dc7726 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Tue, 5 Dec 2023 14:13:12 +0000 Subject: [PATCH 20/45] Fixed the last errors --- internal/service/securitylake/data_lake.go | 1 + .../service/securitylake/data_lake_test.go | 56 +++++++++---------- 2 files changed, 26 insertions(+), 31 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 5f9a8e64230f..87b06f53fcc1 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -605,6 +605,7 @@ func expandDataLakeConfigurations(ctx context.Context, tfList []dataLakeConfigur var replicationConfiguration []dataLakeConfigurationsReplicationConfiguration for _, tfObj := range tfList { + diags.Append(tfObj.EncryptionConfiguration.ElementsAs(ctx, &encryptionConfiguration, false)...) diags.Append(tfObj.LifecycleConfiguration.ElementsAs(ctx, &lifecycleConfiguration, false)...) diags.Append(tfObj.ReplicationConfiguration.ElementsAs(ctx, &replicationConfiguration, false)...) lifecycleConfiguration, d := expandLifecycleConfiguration(ctx, lifecycleConfiguration) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 95a4d164eb96..e98e26b2dad6 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -46,8 +46,6 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), ), }, { @@ -86,7 +84,7 @@ func TestAccSecurityLakeDataLake_lifeCycle(t *testing.T) { resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "2"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), @@ -133,7 +131,7 @@ func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "2"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), @@ -157,7 +155,7 @@ func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), @@ -202,7 +200,6 @@ func TestAccSecurityLakeDataLake_replication(t *testing.T) { resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configurations.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.#", "1"), resource.TestCheckResourceAttr(resourceName, "configurations.0.lifecycle_configuration.0.transitions.0.days", "31"), @@ -471,26 +468,26 @@ resource "aws_iam_role_policy" "datalake_s3_replication" { } resource "aws_kms_key" "test" { - description = %[1]q - - policy = jsonencode({ - Version = "2012-10-17" - Id = %[1]q - - Statement = [{ - Sid = "Enable IAM User Permissions" - Effect = "Allow" - Action = "kms:*" - Resource = "*" - - Principal = { - AWS = "*" - } - }] - }) + deletion_window_in_days = 7 + + policy = < Date: Tue, 5 Dec 2023 14:21:00 +0000 Subject: [PATCH 21/45] Fixed the formating error --- .../service/securitylake/data_lake_test.go | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index e98e26b2dad6..62b5ab4109ac 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -304,6 +304,10 @@ func testAccDataLakeConfigBaseConfig(rName string) string { + + + + data "aws_caller_identity" "current" {} resource "aws_iam_role" "meta_store_manager" { @@ -468,9 +472,9 @@ resource "aws_iam_role_policy" "datalake_s3_replication" { } resource "aws_kms_key" "test" { - deletion_window_in_days = 7 - - policy = < Date: Tue, 5 Dec 2023 13:59:26 -0500 Subject: [PATCH 22/45] Tweak comments. --- internal/framework/types/list_nested_objectof.go | 2 +- internal/framework/types/set_nested_objectof.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/framework/types/list_nested_objectof.go b/internal/framework/types/list_nested_objectof.go index 44f3972c60df..9a60f67f93e4 100644 --- a/internal/framework/types/list_nested_objectof.go +++ b/internal/framework/types/list_nested_objectof.go @@ -142,7 +142,7 @@ func nestedObjectTypeNewObjectSlice[T any](_ context.Context, len, cap int) ([]* return make([]*T, len, cap), diags } -// ListNestedObjectValueOf represents a Terraform Plugin Framework List value whose elements are of type ObjectTypeOf. +// ListNestedObjectValueOf represents a Terraform Plugin Framework List value whose elements are of type `ObjectTypeOf[T]`. type ListNestedObjectValueOf[T any] struct { basetypes.ListValue } diff --git a/internal/framework/types/set_nested_objectof.go b/internal/framework/types/set_nested_objectof.go index f3abf1505089..f2a1cdbb7566 100644 --- a/internal/framework/types/set_nested_objectof.go +++ b/internal/framework/types/set_nested_objectof.go @@ -130,7 +130,7 @@ func (t setNestedObjectTypeOf[T]) ValueFromObjectSlice(ctx context.Context, slic return nil, diags } -// SetNestedObjectValueOf represents a Terraform Plugin Framework Set value whose elements are of type ObjectTypeOf. +// SetNestedObjectValueOf represents a Terraform Plugin Framework Set value whose elements are of type `ObjectTypeOf[T]`. type SetNestedObjectValueOf[T any] struct { basetypes.SetValue } From f4e22aafaa9f21b18d1cc358a2f5c01991360def Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 5 Dec 2023 15:04:14 -0500 Subject: [PATCH 23/45] Add 'flex.SetValueOf' etc. --- internal/framework/types/setof.go | 138 ++++++++++++++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 internal/framework/types/setof.go diff --git a/internal/framework/types/setof.go b/internal/framework/types/setof.go new file mode 100644 index 000000000000..25b294cf7e04 --- /dev/null +++ b/internal/framework/types/setof.go @@ -0,0 +1,138 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package types + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" +) + +// setTypeOf is the attribute type of a SetValueOf. +type setTypeOf[T attr.Value] struct { + basetypes.SetType +} + +var ( + _ basetypes.SetTypable = (*setTypeOf[basetypes.StringValue])(nil) + _ basetypes.SetValuable = (*SetValueOf[basetypes.StringValue])(nil) +) + +func newAttrTypeOf[T attr.Value](ctx context.Context) attr.Type { + var zero T + return zero.Type(ctx) +} + +func NewSetTypeOf[T attr.Value](ctx context.Context) setTypeOf[T] { + return setTypeOf[T]{basetypes.SetType{ElemType: newAttrTypeOf[T](ctx)}} +} + +func (t setTypeOf[T]) Equal(o attr.Type) bool { + other, ok := o.(setTypeOf[T]) + + if !ok { + return false + } + + return t.SetType.Equal(other.SetType) +} + +func (t setTypeOf[T]) String() string { + var zero T + return fmt.Sprintf("SetTypeOf[%T]", zero) +} + +func (t setTypeOf[T]) ValueFromSet(ctx context.Context, in basetypes.SetValue) (basetypes.SetValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + if in.IsNull() { + return NewSetValueOfNull[T](ctx), diags + } + if in.IsUnknown() { + return NewSetValueOfUnknown[T](ctx), diags + } + + setValue, d := basetypes.NewSetValue(newAttrTypeOf[T](ctx), in.Elements()) + diags.Append(d...) + if diags.HasError() { + return NewSetValueOfUnknown[T](ctx), diags + } + + value := SetValueOf[T]{ + SetValue: setValue, + } + + return value, diags +} + +func (t setTypeOf[T]) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + attrValue, err := t.SetType.ValueFromTerraform(ctx, in) + + if err != nil { + return nil, err + } + + setValue, ok := attrValue.(basetypes.SetValue) + + if !ok { + return nil, fmt.Errorf("unexpected value type of %T", attrValue) + } + + setValuable, diags := t.ValueFromSet(ctx, setValue) + + if diags.HasError() { + return nil, fmt.Errorf("unexpected error converting SetValue to SetValuable: %v", diags) + } + + return setValuable, nil +} + +func (t setTypeOf[T]) ValueType(ctx context.Context) attr.Value { + return SetValueOf[T]{} +} + +// SetValueOf represents a Terraform Plugin Framework Set value whose elements are of type `T`. +type SetValueOf[T attr.Value] struct { + basetypes.SetValue +} + +func (v SetValueOf[T]) Equal(o attr.Value) bool { + other, ok := o.(SetValueOf[T]) + + if !ok { + return false + } + + return v.SetValue.Equal(other.SetValue) +} + +func (v SetValueOf[T]) Type(ctx context.Context) attr.Type { + return NewSetTypeOf[T](ctx) +} + +func NewSetValueOfNull[T attr.Value](ctx context.Context) SetValueOf[T] { + return SetValueOf[T]{SetValue: basetypes.NewSetNull(newAttrTypeOf[T](ctx))} +} + +func NewSetValueOfUnknown[T attr.Value](ctx context.Context) SetValueOf[T] { + return SetValueOf[T]{SetValue: basetypes.NewSetUnknown(newAttrTypeOf[T](ctx))} +} + +func NewSetValueOf[T attr.Value](ctx context.Context, elements []attr.Value) (SetValueOf[T], diag.Diagnostics) { + v, diags := basetypes.NewSetValue(newAttrTypeOf[T](ctx), elements) + if diags.HasError() { + return NewSetValueOfUnknown[T](ctx), diags + } + + return SetValueOf[T]{SetValue: v}, diags +} + +func NewSetValueOfMust[T attr.Value](ctx context.Context, elements []attr.Value) SetValueOf[T] { + return fwdiag.Must(NewSetValueOf[T](ctx, elements)) +} From 0100044d538ddabcc0801e2bee79312b2ff32e2b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 5 Dec 2023 15:08:15 -0500 Subject: [PATCH 24/45] Add 'flex.SetOfStringType'. --- internal/framework/types/setof.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/framework/types/setof.go b/internal/framework/types/setof.go index 25b294cf7e04..b8463aa4ad84 100644 --- a/internal/framework/types/setof.go +++ b/internal/framework/types/setof.go @@ -19,6 +19,10 @@ type setTypeOf[T attr.Value] struct { basetypes.SetType } +var ( + SetOfStringType = setTypeOf[basetypes.StringValue]{basetypes.SetType{ElemType: basetypes.StringType{}}} +) + var ( _ basetypes.SetTypable = (*setTypeOf[basetypes.StringValue])(nil) _ basetypes.SetValuable = (*SetValueOf[basetypes.StringValue])(nil) From b975bd7a1c74c195a33199d528132191bb14c842 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 5 Dec 2023 15:19:53 -0500 Subject: [PATCH 25/45] Add 'TestSetOfStringFromTerraform'. --- internal/framework/types/setof_test.go | 57 ++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 internal/framework/types/setof_test.go diff --git a/internal/framework/types/setof_test.go b/internal/framework/types/setof_test.go new file mode 100644 index 000000000000..017a587170f5 --- /dev/null +++ b/internal/framework/types/setof_test.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package types_test + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" +) + +func TestSetOfStringFromTerraform(t *testing.T) { + t.Parallel() + + ctx := context.Background() + tests := map[string]struct { + val tftypes.Value + expected attr.Value + }{ + "values": { + val: tftypes.NewValue(tftypes.Set{ + ElementType: tftypes.String, + }, []tftypes.Value{ + tftypes.NewValue(tftypes.String, "red"), + tftypes.NewValue(tftypes.String, "blue"), + tftypes.NewValue(tftypes.String, "green"), + }), + expected: fwtypes.NewSetValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("red"), + types.StringValue("blue"), + types.StringValue("green"), + }), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + val, err := fwtypes.SetOfStringType.ValueFromTerraform(ctx, test.val) + + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if diff := cmp.Diff(val, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} From 03a96b9d5b4085979e7243b993f22547c4271d40 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 5 Dec 2023 15:37:34 -0500 Subject: [PATCH 26/45] Handle empty ID in 'ProblemStandardMessage'. --- internal/create/errors.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/create/errors.go b/internal/create/errors.go index 4a9c5d8f0ed2..0df1a206a27d 100644 --- a/internal/create/errors.go +++ b/internal/create/errors.go @@ -41,9 +41,15 @@ func ProblemStandardMessage(service, action, resource, id string, gotError error } if gotError == nil { + if id == "" { + return fmt.Sprintf("%s %s %s", action, hf, resource) + } return fmt.Sprintf("%s %s %s (%s)", action, hf, resource, id) } + if id == "" { + return fmt.Sprintf("%s %s %s: %s", action, hf, resource, gotError) + } return fmt.Sprintf("%s %s %s (%s): %s", action, hf, resource, id, gotError) } From 439fe34467c1eeb9d19ad51ebce2e68bcd0fb055 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 5 Dec 2023 15:40:55 -0500 Subject: [PATCH 27/45] r/aws_securitylake_data_lake: Use 'SetOf' for 'replication_configuration.regions'. --- internal/service/securitylake/data_lake.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 043b4b78bc21..2e45b088c9a9 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -148,6 +148,7 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "regions": schema.SetAttribute{ + CustomType: fwtypes.SetOfStringType, ElementType: types.StringType, Optional: true, }, @@ -465,8 +466,8 @@ type dataLakeLifecycleTransitionModel struct { } type dataLakeReplicationConfigurationModel struct { - Regions types.Set `tfsdk:"regions"` - RoleARN fwtypes.ARN `tfsdk:"role_arn"` + Regions fwtypes.SetValueOf[types.String] `tfsdk:"regions"` + RoleARN fwtypes.ARN `tfsdk:"role_arn"` } func findDataLakeByARN(ctx context.Context, conn *securitylake.Client, arn string) (*awstypes.DataLakeResource, error) { From d5881f03a8028c34140cd1d8e5c2fe8d324351f1 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 08:44:26 -0500 Subject: [PATCH 28/45] r/aws_securitylake_data_lake: Correct 'waitDataLakeDeleted'. --- internal/service/securitylake/data_lake.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 2e45b088c9a9..f7ecd12b57d5 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -338,6 +338,12 @@ func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, arn str outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*awstypes.DataLakeResource); ok { + if v := output.UpdateStatus; v != nil { + if v := v.Exception; v != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(v.Code), aws.ToString(v.Reason))) + } + } + return output, err } @@ -371,7 +377,7 @@ func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, arn str stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.DataLakeStatusInitialized, awstypes.DataLakeStatusCompleted), Target: []string{}, - Refresh: statusDataLakeUpdate(ctx, conn, arn), + Refresh: statusDataLakeCreate(ctx, conn, arn), Timeout: timeout, } From bfd0e280e2f877d29fe4219f111803edbbd0a887 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 08:44:57 -0500 Subject: [PATCH 29/45] r/aws_securitylake_data_lake: Tidy up acceptance tests. --- .../service/securitylake/data_lake_test.go | 77 ++++++++----------- 1 file changed, 33 insertions(+), 44 deletions(-) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index e73790172023..1a28ee5da236 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -190,6 +190,7 @@ func TestAccSecurityLakeDataLake_replication(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckMultipleRegion(t, 2) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -300,12 +301,10 @@ func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *type } } -func testAccDataLakeConfigBaseConfig(rName string) string { - //lintignore:AWSAT003,AWSAT005 - return fmt.Sprintf(` - - +func testAccDataLakeConfigConfig_base(rName string) string { + return ` data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} resource "aws_iam_role" "meta_store_manager" { name = "AmazonSecurityLakeMetaStoreManager" @@ -327,9 +326,6 @@ resource "aws_iam_role" "meta_store_manager" { ] } POLICY - tags = { - Name = %[1]q - } } resource "aws_iam_role_policy" "meta_store_manager" { @@ -348,7 +344,7 @@ resource "aws_iam_role_policy" "meta_store_manager" { "logs:PutLogEvents" ], "Resource": [ - "arn:aws:logs:*:${data.aws_caller_identity.current.account_id}:log-group:/aws/lambda/SecurityLake_Glue_Partition_Updater_Lambda*" + "arn:${data.aws_partition.current.partition}:logs:*:${data.aws_caller_identity.current.account_id}:log-group:/aws/lambda/SecurityLake_Glue_Partition_Updater_Lambda*" ] }, { @@ -358,7 +354,7 @@ resource "aws_iam_role_policy" "meta_store_manager" { "logs:CreateLogGroup" ], "Resource": [ - "arn:aws:logs:*:${data.aws_caller_identity.current.account_id}:/aws/lambda/SecurityLake_Glue_Partition_Updater_Lambda*" + "arn:${data.aws_partition.current.partition}:logs:*:${data.aws_caller_identity.current.account_id}:/aws/lambda/SecurityLake_Glue_Partition_Updater_Lambda*" ] }, { @@ -369,9 +365,9 @@ resource "aws_iam_role_policy" "meta_store_manager" { "glue:BatchCreatePartition" ], "Resource": [ - "arn:aws:glue:*:*:table/amazon_security_lake_glue_db*/*", - "arn:aws:glue:*:*:database/amazon_security_lake_glue_db*", - "arn:aws:glue:*:*:catalog" + "arn:${data.aws_partition.current.partition}:glue:*:*:table/amazon_security_lake_glue_db*/*", + "arn:${data.aws_partition.current.partition}:glue:*:*:database/amazon_security_lake_glue_db*", + "arn:${data.aws_partition.current.partition}:glue:*:*:catalog" ] }, { @@ -383,7 +379,7 @@ resource "aws_iam_role_policy" "meta_store_manager" { "sqs:GetQueueAttributes" ], "Resource": [ - "arn:aws:sqs:*:${data.aws_caller_identity.current.account_id}:SecurityLake*" + "arn:${data.aws_partition.current.partition}:sqs:*:${data.aws_caller_identity.current.account_id}:SecurityLake*" ] } ] @@ -406,9 +402,6 @@ resource "aws_iam_role" "datalake_s3_replication" { }] } POLICY - tags = { - Name = %[1]q - } } resource "aws_iam_role_policy" "datalake_s3_replication" { @@ -433,8 +426,8 @@ resource "aws_iam_role_policy" "datalake_s3_replication" { ], "Effect": "Allow", "Resource": [ - "arn:aws:s3:::aws-security-data-lake*", - "arn:aws:s3:::aws-security-data-lake*/*" + "arn:${data.aws_partition.current.partition}:s3:::aws-security-data-lake*", + "arn:${data.aws_partition.current.partition}:s3:::aws-security-data-lake*/*" ], "Condition": { "StringEquals": { @@ -453,7 +446,7 @@ resource "aws_iam_role_policy" "datalake_s3_replication" { ], "Effect": "Allow", "Resource": [ - "arn:aws:s3:::aws-security-data-lake*/*" + "arn:${data.aws_partition.current.partition}:s3:::aws-security-data-lake*/*" ], "Condition": { "StringEquals": { @@ -465,20 +458,18 @@ resource "aws_iam_role_policy" "datalake_s3_replication" { } ] } - EOF +EOF } -`, rName) +` } func testAccDataLakeConfig_basic(rName string) string { - //lintignore:AWSAT003,AWSAT005 - return acctest.ConfigCompose(testAccDataLakeConfigBaseConfig(rName), fmt.Sprintf(` - + return acctest.ConfigCompose(testAccDataLakeConfigConfig_base(rName), fmt.Sprintf(` resource "aws_securitylake_data_lake" "test" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn configuration { - region = "eu-west-1" + region = %[2]q encryption_configuration { kms_key_id = "S3_MANAGED_KEY" @@ -489,19 +480,16 @@ resource "aws_securitylake_data_lake" "test" { } depends_on = [aws_iam_role.meta_store_manager] } -`, rName)) +`, rName, acctest.Region())) } func testAccDataLakeConfig_lifeCycle(rName string) string { - //lintignore:AWSAT003,AWSAT005 - return acctest.ConfigCompose(testAccDataLakeConfigBaseConfig(rName), fmt.Sprintf(` - - + return acctest.ConfigCompose(testAccDataLakeConfigConfig_base(rName), fmt.Sprintf(` resource "aws_securitylake_data_lake" "test" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn configuration { - region = "eu-west-1" + region = %[2]q encryption_configuration { kms_key_id = "S3_MANAGED_KEY" @@ -521,23 +509,23 @@ resource "aws_securitylake_data_lake" "test" { } } } + tags = { Name = %[1]q } + depends_on = [aws_iam_role.meta_store_manager] } -`, rName)) +`, rName, acctest.Region())) } func testAccDataLakeConfig_lifeCycleUpdate(rName string) string { - //lintignore:AWSAT003,AWSAT005 - return acctest.ConfigCompose(testAccDataLakeConfigBaseConfig(rName), fmt.Sprintf(` - + return acctest.ConfigCompose(testAccDataLakeConfigConfig_base(rName), fmt.Sprintf(` resource "aws_securitylake_data_lake" "test" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn configuration { - region = "eu-west-1" + region = %[2]q encryption_configuration { kms_key_id = "S3_MANAGED_KEY" @@ -553,24 +541,23 @@ resource "aws_securitylake_data_lake" "test" { } } } + tags = { Name = %[1]q } + depends_on = [aws_iam_role.meta_store_manager] } -`, rName)) +`, rName, acctest.Region())) } func testAccDataLakeConfig_replication(rName string) string { - //lintignore:AWSAT003,AWSAT005 return acctest.ConfigCompose(testAccDataLakeConfig_basic(rName), fmt.Sprintf(` - - resource "aws_securitylake_data_lake" "region_2" { meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn configuration { - region = "eu-west-2" + region = %[3]q encryption_configuration { kms_key_id = "S3_MANAGED_KEY" @@ -587,13 +574,15 @@ resource "aws_securitylake_data_lake" "region_2" { } replication_configuration { role_arn = aws_iam_role.datalake_s3_replication.arn - regions = ["eu-west-1"] + regions = [%[2]q] } } + tags = { Name = %[1]q } + depends_on = [aws_iam_role.meta_store_manager, aws_iam_role.datalake_s3_replication, aws_securitylake_data_lake.test] } -`, rName)) +`, rName, acctest.Region(), acctest.AlternateRegion())) } From 3b0136adbf5ba01ad2a59f258d9d28c0de4e5ba7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 08:55:48 -0500 Subject: [PATCH 30/45] Add 'TestAccSecurityLakeDataLake_tags'. --- .../service/securitylake/data_lake_test.go | 184 ++++++++++++++---- 1 file changed, 143 insertions(+), 41 deletions(-) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 1a28ee5da236..4aef733f651e 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -42,19 +42,104 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDataLakeConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), - resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.Region()), + resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + }, + }, + }) +} + +func TestAccSecurityLakeDataLake_disappears(t *testing.T) { + ctx := acctest.Context(t) + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfsecuritylake.ResourceDataLake, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccSecurityLakeDataLake_tags(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + }, + { + Config: testAccDataLakeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccDataLakeConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), }, }, }) @@ -83,7 +168,7 @@ func TestAccSecurityLakeDataLake_lifeCycle(t *testing.T) { Config: testAccDataLakeConfig_lifeCycle(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), - resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), @@ -101,7 +186,7 @@ func TestAccSecurityLakeDataLake_lifeCycle(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, }, }, }) @@ -130,7 +215,7 @@ func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { Config: testAccDataLakeConfig_lifeCycle(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), - resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), @@ -148,13 +233,13 @@ func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, }, { Config: testAccDataLakeConfig_lifeCycleUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), - resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), @@ -170,7 +255,7 @@ func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, }, }, }) @@ -200,7 +285,7 @@ func TestAccSecurityLakeDataLake_replication(t *testing.T) { Config: testAccDataLakeConfig_replication(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), - resource.TestCheckTypeSetElemAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), @@ -211,42 +296,16 @@ func TestAccSecurityLakeDataLake_replication(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "configuration.0.replication_configuration.0.role_arn", "aws_iam_role.datalake_s3_replication", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.replication_configuration.0.role_arn", "aws_iam_role.datalake_s3_replication", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.0.regions.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.replication_configuration.0.regions.*", acctest.AlternateRegion()), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn", "tags"}, - }, - }, - }) -} - -func TestAccSecurityLakeDataLake_disappears(t *testing.T) { - ctx := acctest.Context(t) - var datalake types.DataLakeResource - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_securitylake_data_lake.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.SecurityLake) - }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDataLakeDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDataLakeConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDataLakeExists(ctx, resourceName, &datalake), - acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfsecuritylake.ResourceDataLake, resourceName), - ), - ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, }, }, }) @@ -475,12 +534,55 @@ resource "aws_securitylake_data_lake" "test" { kms_key_id = "S3_MANAGED_KEY" } } + + depends_on = [aws_iam_role.meta_store_manager] +} +`, rName, acctest.Region())) +} + +func testAccDataLakeConfig_tags1(rName, tag1Key, tag1Value string) string { + return acctest.ConfigCompose(testAccDataLakeConfigConfig_base(rName), fmt.Sprintf(` +resource "aws_securitylake_data_lake" "test" { + meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn + + configuration { + region = %[4]q + + encryption_configuration { + kms_key_id = "S3_MANAGED_KEY" + } + } + tags = { - Name = %[1]q + %[2]q = %[3]q } + depends_on = [aws_iam_role.meta_store_manager] } -`, rName, acctest.Region())) +`, rName, tag1Key, tag1Value, acctest.Region())) +} + +func testAccDataLakeConfig_tags2(rName, tag1Key, tag1Value, tag2Key, tag2Value string) string { + return acctest.ConfigCompose(testAccDataLakeConfigConfig_base(rName), fmt.Sprintf(` +resource "aws_securitylake_data_lake" "test" { + meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn + + configuration { + region = %[6]q + + encryption_configuration { + kms_key_id = "S3_MANAGED_KEY" + } + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } + + depends_on = [aws_iam_role.meta_store_manager] +} +`, rName, tag1Key, tag1Value, tag2Key, tag2Value, acctest.Region())) } func testAccDataLakeConfig_lifeCycle(rName string) string { From 148a44fd20c03a4b57bc08fac4061258904cfd66 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 09:00:31 -0500 Subject: [PATCH 31/45] securitylake: Serialize acceptance tests. --- .../service/securitylake/data_lake_test.go | 24 ++++++++--------- .../service/securitylake/securitylake_test.go | 27 +++++++++++++++++++ 2 files changed, 39 insertions(+), 12 deletions(-) create mode 100644 internal/service/securitylake/securitylake_test.go diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 4aef733f651e..067f2143674f 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -21,7 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccSecurityLakeDataLake_basic(t *testing.T) { +func testAccDataLake_basic(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -31,7 +31,7 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) @@ -64,13 +64,13 @@ func TestAccSecurityLakeDataLake_basic(t *testing.T) { }) } -func TestAccSecurityLakeDataLake_disappears(t *testing.T) { +func testAccDataLake_disappears(t *testing.T) { ctx := acctest.Context(t) var datalake types.DataLakeResource rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) @@ -91,7 +91,7 @@ func TestAccSecurityLakeDataLake_disappears(t *testing.T) { }) } -func TestAccSecurityLakeDataLake_tags(t *testing.T) { +func testAccDataLake_tags(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -101,7 +101,7 @@ func TestAccSecurityLakeDataLake_tags(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) @@ -145,7 +145,7 @@ func TestAccSecurityLakeDataLake_tags(t *testing.T) { }) } -func TestAccSecurityLakeDataLake_lifeCycle(t *testing.T) { +func testAccDataLake_lifeCycle(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -155,7 +155,7 @@ func TestAccSecurityLakeDataLake_lifeCycle(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) @@ -192,7 +192,7 @@ func TestAccSecurityLakeDataLake_lifeCycle(t *testing.T) { }) } -func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { +func testAccDataLake_lifeCycleUpdate(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -202,7 +202,7 @@ func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) @@ -261,7 +261,7 @@ func TestAccSecurityLakeDataLake_lifeCycleUpdate(t *testing.T) { }) } -func TestAccSecurityLakeDataLake_replication(t *testing.T) { +func testAccDataLake_replication(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -271,7 +271,7 @@ func TestAccSecurityLakeDataLake_replication(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.region_2" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) diff --git a/internal/service/securitylake/securitylake_test.go b/internal/service/securitylake/securitylake_test.go new file mode 100644 index 000000000000..b54dd0f083f0 --- /dev/null +++ b/internal/service/securitylake/securitylake_test.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package securitylake_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-aws/internal/acctest" +) + +func TestAccSecurityLake_serial(t *testing.T) { + t.Parallel() + + testCases := map[string]map[string]func(t *testing.T){ + "DataLake": { + "basic": testAccDataLake_basic, + "disappears": testAccDataLake_disappears, + "tags": testAccDataLake_tags, + "lifecycle": testAccDataLake_lifeCycle, + "lifecycleUpdate": testAccDataLake_lifeCycleUpdate, + "replication": testAccDataLake_replication, + }, + } + + acctest.RunSerialTests2Levels(t, testCases, 0) +} From 881a173a4cc925e282fa6581f8051ba0d84765c7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 10:41:05 -0500 Subject: [PATCH 32/45] r/aws_securitylake_data_lake: Fix Read. --- internal/service/securitylake/data_lake.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index f7ecd12b57d5..f5ece90e5487 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -222,6 +222,11 @@ func (r *dataLakeResource) Read(ctx context.Context, req resource.ReadRequest, r return } + if err := data.InitFromID(); err != nil { + resp.Diagnostics.AddError("parsing resource ID", err.Error()) + return + } + dataLake, err := findDataLakeByARN(ctx, conn, data.ID.ValueString()) if tfresource.NotFound(err) { @@ -237,11 +242,14 @@ func (r *dataLakeResource) Read(ctx context.Context, req resource.ReadRequest, r return } - resp.Diagnostics.Append(flex.Flatten(ctx, dataLake, &data)...) + var configuration dataLakeConfigurationModel + resp.Diagnostics.Append(flex.Flatten(ctx, dataLake, &configuration)...) if resp.Diagnostics.HasError() { return } + data.Configurations = fwtypes.NewListNestedObjectValueOfPtr(ctx, &configuration) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) } @@ -442,6 +450,12 @@ type dataLakeResourceModel struct { Timeouts timeouts.Value `tfsdk:"timeouts"` } +func (model *dataLakeResourceModel) InitFromID() error { + model.DataLakeARN = model.ID + + return nil +} + func (model *dataLakeResourceModel) setID() { model.ID = model.DataLakeARN } From 7f0fed3f468aae49f562c24066ef834eaa58150d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 10:44:03 -0500 Subject: [PATCH 33/45] r/aws_securitylake_data_lake: Ensure standalone (non-Org) account for acceptance testing. --- internal/service/securitylake/data_lake_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 067f2143674f..c1ba4b3e6117 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -35,6 +35,7 @@ func testAccDataLake_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,6 +75,7 @@ func testAccDataLake_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -105,6 +107,7 @@ func testAccDataLake_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -159,6 +162,7 @@ func testAccDataLake_lifeCycle(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -206,6 +210,7 @@ func testAccDataLake_lifeCycleUpdate(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -275,6 +280,7 @@ func testAccDataLake_replication(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), From 3d015736299266227ebfe18360db4f13ee59c073 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 10:46:28 -0500 Subject: [PATCH 34/45] Add 'names.SecurityLakeEndpointID'. --- internal/service/securitylake/data_lake_test.go | 12 ++++++------ names/names.go | 1 + 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index c1ba4b3e6117..5ddd0aed46fa 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -37,7 +37,7 @@ func testAccDataLake_basic(t *testing.T) { acctest.PreCheckPartitionHasService(t, names.SecurityLake) acctest.PreCheckOrganizationsAccount(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ @@ -77,7 +77,7 @@ func testAccDataLake_disappears(t *testing.T) { acctest.PreCheckPartitionHasService(t, names.SecurityLake) acctest.PreCheckOrganizationsAccount(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ @@ -109,7 +109,7 @@ func testAccDataLake_tags(t *testing.T) { acctest.PreCheckPartitionHasService(t, names.SecurityLake) acctest.PreCheckOrganizationsAccount(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ @@ -164,7 +164,7 @@ func testAccDataLake_lifeCycle(t *testing.T) { acctest.PreCheckPartitionHasService(t, names.SecurityLake) acctest.PreCheckOrganizationsAccount(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ @@ -212,7 +212,7 @@ func testAccDataLake_lifeCycleUpdate(t *testing.T) { acctest.PreCheckPartitionHasService(t, names.SecurityLake) acctest.PreCheckOrganizationsAccount(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ @@ -283,7 +283,7 @@ func testAccDataLake_replication(t *testing.T) { acctest.PreCheckOrganizationsAccount(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityLake), + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ diff --git a/names/names.go b/names/names.go index 8b73a9860687..9184878d4992 100644 --- a/names/names.go +++ b/names/names.go @@ -73,6 +73,7 @@ const ( RolesAnywhereEndpointID = "rolesanywhere" Route53DomainsEndpointID = "route53domains" SchedulerEndpointID = "scheduler" + SecurityLakeEndpointID = "securitylake" ServiceQuotasEndpointID = "servicequotas" S3EndpointID = "s3" S3ControlEndpointID = "s3-control" From 14c7954f6b59dc84b2f78baec21a2665738b1802 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 10:51:54 -0500 Subject: [PATCH 35/45] r/aws_securitylake_data_lake: Add 's3_bucket_arn' attribute. --- internal/service/securitylake/data_lake.go | 12 ++++++++++-- internal/service/securitylake/data_lake_test.go | 1 + website/docs/r/securitylake_data_lake.html.markdown | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index f5ece90e5487..843b666bc2d2 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -72,6 +72,7 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques stringplanmodifier.RequiresReplace(), }, }, + "s3_bucket_arn": framework.ARNAttributeComputedOnly(), names.AttrTags: tftags.TagsAttribute(), names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), }, @@ -199,10 +200,13 @@ func (r *dataLakeResource) Create(ctx context.Context, req resource.CreateReques } // Set values for unknowns. - data.DataLakeARN = flex.StringToFramework(ctx, output.DataLakes[0].DataLakeArn) + dataLake := &output.DataLakes[0] + data.DataLakeARN = flex.StringToFramework(ctx, dataLake.DataLakeArn) data.setID() - if _, err := waitDataLakeCreated(ctx, conn, data.ID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)); err != nil { + dataLake, err = waitDataLakeCreated(ctx, conn, data.ID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + + if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, data.ID.ValueString(), err), err.Error(), @@ -210,6 +214,8 @@ func (r *dataLakeResource) Create(ctx context.Context, req resource.CreateReques return } + data.S3BucketARN = flex.StringToFramework(ctx, dataLake.S3BucketArn) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) } @@ -249,6 +255,7 @@ func (r *dataLakeResource) Read(ctx context.Context, req resource.ReadRequest, r } data.Configurations = fwtypes.NewListNestedObjectValueOfPtr(ctx, &configuration) + data.S3BucketARN = flex.StringToFramework(ctx, dataLake.S3BucketArn) resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) } @@ -445,6 +452,7 @@ type dataLakeResourceModel struct { DataLakeARN types.String `tfsdk:"arn"` ID types.String `tfsdk:"id"` MetaStoreManagerRoleARN fwtypes.ARN `tfsdk:"meta_store_manager_role_arn"` + S3BucketARN types.String `tfsdk:"s3_bucket_arn"` Tags types.Map `tfsdk:"tags"` TagsAll types.Map `tfsdk:"tags_all"` Timeouts timeouts.Value `tfsdk:"timeouts"` diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 5ddd0aed46fa..49ec7879c64d 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -52,6 +52,7 @@ func testAccDataLake_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.Region()), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "0"), resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttrSet(resourceName, "s3_bucket_arn"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, diff --git a/website/docs/r/securitylake_data_lake.html.markdown b/website/docs/r/securitylake_data_lake.html.markdown index 2844ce563112..e983b587be76 100644 --- a/website/docs/r/securitylake_data_lake.html.markdown +++ b/website/docs/r/securitylake_data_lake.html.markdown @@ -99,6 +99,7 @@ Replication Configuration support the following: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Data Lake. +* `s3_bucket_arn` - The ARN for the Amazon Security Lake Amazon S3 bucket. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts From dd040ce20b9c3104b8290f926bd701f2d32ea874 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 12:03:28 -0500 Subject: [PATCH 36/45] Fix 'testAccDataLake_replication'. --- internal/service/securitylake/data_lake_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 49ec7879c64d..4f93279e3a63 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -305,7 +305,7 @@ func testAccDataLake_replication(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "configuration.0.replication_configuration.0.role_arn", "aws_iam_role.datalake_s3_replication", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.0.regions.#", "1"), - resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.replication_configuration.0.regions.*", acctest.AlternateRegion()), + resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.replication_configuration.0.regions.*", acctest.Region()), ), }, { From a21ecaf37d3785f837e261b2e7dc53396568c9d6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 12:16:41 -0500 Subject: [PATCH 37/45] r/aws_securitylake_data_lake: Handle tag retrieval for resource's 'region' != configured AWS Region. --- internal/service/securitylake/data_lake.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 843b666bc2d2..5d93f199a405 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -257,6 +257,14 @@ func (r *dataLakeResource) Read(ctx context.Context, req resource.ReadRequest, r data.Configurations = fwtypes.NewListNestedObjectValueOfPtr(ctx, &configuration) data.S3BucketARN = flex.StringToFramework(ctx, dataLake.S3BucketArn) + // Transparent tagging fails with "ResourceNotFoundException: The request failed because the specified resource doesn't exist." + // if the data lake's AWS Region isn't the configured one. + if region := configuration.Region.ValueString(); region != r.Meta().Region { + if tags, err := listTags(ctx, conn, data.ID.ValueString()); err == nil { + setTagsOut(ctx, Tags(tags)) + } + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) } From 14c5da5428aae395ac7f2b769b16ae7396d282e6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 15:19:25 -0500 Subject: [PATCH 38/45] Merge 276b23fccffa323035d4c9b040d86147e0806ea5, 9984e3d93343e1cd902c389501fef31be5dc7726 and 9560d75a2e7e5d54f2920fda67eb7f9296e40e87. --- internal/service/securitylake/data_lake.go | 2 - .../service/securitylake/data_lake_test.go | 264 +++++++++--------- 2 files changed, 129 insertions(+), 137 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 5d93f199a405..5fddac4946b1 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -17,7 +17,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" @@ -100,7 +99,6 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques "kms_key_id": schema.StringAttribute{ Optional: true, Computed: true, - Default: stringdefault.StaticString("S3_MANAGED_KEY"), }, }, }, diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 4f93279e3a63..71d49e08ee3c 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -47,7 +47,6 @@ func testAccDataLake_basic(t *testing.T) { testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.Region()), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "0"), @@ -176,7 +175,7 @@ func testAccDataLake_lifeCycle(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttrPair(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "2"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), @@ -224,7 +223,7 @@ func testAccDataLake_lifeCycleUpdate(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttrPair(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "2"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), @@ -248,7 +247,7 @@ func testAccDataLake_lifeCycleUpdate(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttrPair(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), @@ -377,19 +376,17 @@ resource "aws_iam_role" "meta_store_manager" { path = "/service-role/" assume_role_policy = < Date: Wed, 6 Dec 2023 15:21:30 -0500 Subject: [PATCH 39/45] Run 'go generate ./internal/service/securitylake'. --- internal/service/securitylake/tags_gen.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/securitylake/tags_gen.go b/internal/service/securitylake/tags_gen.go index 05a42fb3fbcd..9373d7c9c84c 100644 --- a/internal/service/securitylake/tags_gen.go +++ b/internal/service/securitylake/tags_gen.go @@ -19,12 +19,12 @@ import ( // listTags lists securitylake service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn *securitylake.Client, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *securitylake.Client, identifier string, optFns ...func(*securitylake.Options)) (tftags.KeyValueTags, error) { input := &securitylake.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResource(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []awstypes.Tag) { // updateTags updates securitylake service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn *securitylake.Client, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *securitylake.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*securitylake.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *securitylake.Client, identifier strin TagKeys: removedTags.Keys(), } - _, err := conn.UntagResource(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn *securitylake.Client, identifier strin Tags: Tags(updatedTags), } - _, err := conn.TagResource(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) From 93f77009c81e3a6ca570b5677af6768d2c56a3fb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 15:23:02 -0500 Subject: [PATCH 40/45] r/aws_securitylake_data_lake: Pass Region to 'listTags'. --- internal/service/securitylake/data_lake.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 5fddac4946b1..9b7ed9cfcb0a 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -258,7 +258,7 @@ func (r *dataLakeResource) Read(ctx context.Context, req resource.ReadRequest, r // Transparent tagging fails with "ResourceNotFoundException: The request failed because the specified resource doesn't exist." // if the data lake's AWS Region isn't the configured one. if region := configuration.Region.ValueString(); region != r.Meta().Region { - if tags, err := listTags(ctx, conn, data.ID.ValueString()); err == nil { + if tags, err := listTags(ctx, conn, data.ID.ValueString(), func(o *securitylake.Options) { o.Region = region }); err == nil { setTagsOut(ctx, Tags(tags)) } } From 854a5e0c0783221192904ea01f24cc4a8d3eacbd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 15:26:01 -0500 Subject: [PATCH 41/45] Fix 'testAccDataLake_basic'. --- internal/service/securitylake/data_lake_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 71d49e08ee3c..2167747bac48 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -46,7 +46,7 @@ func testAccDataLake_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.Region()), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "0"), @@ -293,8 +293,7 @@ func testAccDataLake_replication(t *testing.T) { testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), From 7b24991bef55eba6a68e6bcda80f4d81392bd161 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 16:29:44 -0500 Subject: [PATCH 42/45] r/aws_securitylake_data_lake: encryption_configuration is a nested attribute. --- internal/service/securitylake/data_lake.go | 36 ++++++++++++------- .../service/securitylake/data_lake_test.go | 4 ++- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 9b7ed9cfcb0a..9549da2770fa 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -14,8 +14,10 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/securitylake/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" @@ -84,25 +86,27 @@ func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaReques }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ - "region": schema.StringAttribute{ - Required: true, - }, - }, - Blocks: map[string]schema.Block{ - "encryption_configuration": schema.ListNestedBlock{ + "encryption_configuration": schema.ListAttribute{ CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeEncryptionConfigurationModel](ctx), + Optional: true, + Computed: true, Validators: []validator.List{ listvalidator.SizeAtMost(1), }, - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "kms_key_id": schema.StringAttribute{ - Optional: true, - Computed: true, - }, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "kms_key_id": types.StringType, }, }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, }, + "region": schema.StringAttribute{ + Required: true, + }, + }, + Blocks: map[string]schema.Block{ "lifecycle_configuration": schema.ListNestedBlock{ CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeLifecycleConfigurationModel](ctx), Validators: []validator.List{ @@ -212,6 +216,14 @@ func (r *dataLakeResource) Create(ctx context.Context, req resource.CreateReques return } + var configuration dataLakeConfigurationModel + resp.Diagnostics.Append(flex.Flatten(ctx, dataLake, &configuration)...) + if resp.Diagnostics.HasError() { + return + } + + // Set values for unknowns after creation is complete. + data.Configurations = fwtypes.NewListNestedObjectValueOfPtr(ctx, &configuration) data.S3BucketARN = flex.StringToFramework(ctx, dataLake.S3BucketArn) resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 2167747bac48..79a41fe487ed 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -46,7 +46,8 @@ func testAccDataLake_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.Region()), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "0"), @@ -674,6 +675,7 @@ resource "aws_securitylake_data_lake" "region_2" { days = 300 } } + replication_configuration { role_arn = aws_iam_role.datalake_s3_replication.arn regions = [%[2]q] From dc7a242f3e25b8a8e7abbff419a8bfad843e9d03 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 6 Dec 2023 17:01:17 -0500 Subject: [PATCH 43/45] Fix 'testAccDataLake_replication'. --- internal/service/securitylake/data_lake_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 79a41fe487ed..48912afcb8b4 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -294,7 +294,8 @@ func testAccDataLake_replication(t *testing.T) { testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), From d362d4bf44aa703c45801b9979f8547cd2a381eb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 7 Dec 2023 07:48:56 -0500 Subject: [PATCH 44/45] Fix typo in acceptance test checks. --- internal/service/securitylake/data_lake_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 48912afcb8b4..93aa04ea80c6 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -176,7 +176,7 @@ func testAccDataLake_lifeCycle(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "2"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), @@ -224,7 +224,7 @@ func testAccDataLake_lifeCycleUpdate(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "2"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), @@ -248,7 +248,7 @@ func testAccDataLake_lifeCycleUpdate(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "configurations.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), From 69a092ddb61ef95fa021fcdb8de63f698b32ddc0 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 7 Dec 2023 07:59:03 -0500 Subject: [PATCH 45/45] Correct CHANGELOG entry file name. --- .changelog/{29376.txt => 34521.txt} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .changelog/{29376.txt => 34521.txt} (100%) diff --git a/.changelog/29376.txt b/.changelog/34521.txt similarity index 100% rename from .changelog/29376.txt rename to .changelog/34521.txt