diff --git a/.changelog/34521.txt b/.changelog/34521.txt new file mode 100644 index 000000000000..e5eda46c5d05 --- /dev/null +++ b/.changelog/34521.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +resource/aws_securitylake_data_lake +``` diff --git a/internal/create/errors.go b/internal/create/errors.go index 4a9c5d8f0ed2..0df1a206a27d 100644 --- a/internal/create/errors.go +++ b/internal/create/errors.go @@ -41,9 +41,15 @@ func ProblemStandardMessage(service, action, resource, id string, gotError error } if gotError == nil { + if id == "" { + return fmt.Sprintf("%s %s %s", action, hf, resource) + } return fmt.Sprintf("%s %s %s (%s)", action, hf, resource, id) } + if id == "" { + return fmt.Sprintf("%s %s %s: %s", action, hf, resource, gotError) + } return fmt.Sprintf("%s %s %s (%s): %s", action, hf, resource, id, gotError) } diff --git a/internal/framework/types/list_nested_objectof.go b/internal/framework/types/list_nested_objectof.go index 44f3972c60df..9a60f67f93e4 100644 --- a/internal/framework/types/list_nested_objectof.go +++ b/internal/framework/types/list_nested_objectof.go @@ -142,7 +142,7 @@ func nestedObjectTypeNewObjectSlice[T any](_ context.Context, len, cap int) ([]* return make([]*T, len, cap), diags } -// ListNestedObjectValueOf represents a Terraform Plugin Framework List value whose elements are of type ObjectTypeOf. +// ListNestedObjectValueOf represents a Terraform Plugin Framework List value whose elements are of type `ObjectTypeOf[T]`. type ListNestedObjectValueOf[T any] struct { basetypes.ListValue } diff --git a/internal/framework/types/set_nested_objectof.go b/internal/framework/types/set_nested_objectof.go index f3abf1505089..f2a1cdbb7566 100644 --- a/internal/framework/types/set_nested_objectof.go +++ b/internal/framework/types/set_nested_objectof.go @@ -130,7 +130,7 @@ func (t setNestedObjectTypeOf[T]) ValueFromObjectSlice(ctx context.Context, slic return nil, diags } -// SetNestedObjectValueOf represents a Terraform Plugin Framework Set value whose elements are of type ObjectTypeOf. +// SetNestedObjectValueOf represents a Terraform Plugin Framework Set value whose elements are of type `ObjectTypeOf[T]`. type SetNestedObjectValueOf[T any] struct { basetypes.SetValue } diff --git a/internal/framework/types/setof.go b/internal/framework/types/setof.go new file mode 100644 index 000000000000..b8463aa4ad84 --- /dev/null +++ b/internal/framework/types/setof.go @@ -0,0 +1,142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package types + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" +) + +// setTypeOf is the attribute type of a SetValueOf. +type setTypeOf[T attr.Value] struct { + basetypes.SetType +} + +var ( + SetOfStringType = setTypeOf[basetypes.StringValue]{basetypes.SetType{ElemType: basetypes.StringType{}}} +) + +var ( + _ basetypes.SetTypable = (*setTypeOf[basetypes.StringValue])(nil) + _ basetypes.SetValuable = (*SetValueOf[basetypes.StringValue])(nil) +) + +func newAttrTypeOf[T attr.Value](ctx context.Context) attr.Type { + var zero T + return zero.Type(ctx) +} + +func NewSetTypeOf[T attr.Value](ctx context.Context) setTypeOf[T] { + return setTypeOf[T]{basetypes.SetType{ElemType: newAttrTypeOf[T](ctx)}} +} + +func (t setTypeOf[T]) Equal(o attr.Type) bool { + other, ok := o.(setTypeOf[T]) + + if !ok { + return false + } + + return t.SetType.Equal(other.SetType) +} + +func (t setTypeOf[T]) String() string { + var zero T + return fmt.Sprintf("SetTypeOf[%T]", zero) +} + +func (t setTypeOf[T]) ValueFromSet(ctx context.Context, in basetypes.SetValue) (basetypes.SetValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + if in.IsNull() { + return NewSetValueOfNull[T](ctx), diags + } + if in.IsUnknown() { + return NewSetValueOfUnknown[T](ctx), diags + } + + setValue, d := basetypes.NewSetValue(newAttrTypeOf[T](ctx), in.Elements()) + diags.Append(d...) + if diags.HasError() { + return NewSetValueOfUnknown[T](ctx), diags + } + + value := SetValueOf[T]{ + SetValue: setValue, + } + + return value, diags +} + +func (t setTypeOf[T]) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + attrValue, err := t.SetType.ValueFromTerraform(ctx, in) + + if err != nil { + return nil, err + } + + setValue, ok := attrValue.(basetypes.SetValue) + + if !ok { + return nil, fmt.Errorf("unexpected value type of %T", attrValue) + } + + setValuable, diags := t.ValueFromSet(ctx, setValue) + + if diags.HasError() { + return nil, fmt.Errorf("unexpected error converting SetValue to SetValuable: %v", diags) + } + + return setValuable, nil +} + +func (t setTypeOf[T]) ValueType(ctx context.Context) attr.Value { + return SetValueOf[T]{} +} + +// SetValueOf represents a Terraform Plugin Framework Set value whose elements are of type `T`. +type SetValueOf[T attr.Value] struct { + basetypes.SetValue +} + +func (v SetValueOf[T]) Equal(o attr.Value) bool { + other, ok := o.(SetValueOf[T]) + + if !ok { + return false + } + + return v.SetValue.Equal(other.SetValue) +} + +func (v SetValueOf[T]) Type(ctx context.Context) attr.Type { + return NewSetTypeOf[T](ctx) +} + +func NewSetValueOfNull[T attr.Value](ctx context.Context) SetValueOf[T] { + return SetValueOf[T]{SetValue: basetypes.NewSetNull(newAttrTypeOf[T](ctx))} +} + +func NewSetValueOfUnknown[T attr.Value](ctx context.Context) SetValueOf[T] { + return SetValueOf[T]{SetValue: basetypes.NewSetUnknown(newAttrTypeOf[T](ctx))} +} + +func NewSetValueOf[T attr.Value](ctx context.Context, elements []attr.Value) (SetValueOf[T], diag.Diagnostics) { + v, diags := basetypes.NewSetValue(newAttrTypeOf[T](ctx), elements) + if diags.HasError() { + return NewSetValueOfUnknown[T](ctx), diags + } + + return SetValueOf[T]{SetValue: v}, diags +} + +func NewSetValueOfMust[T attr.Value](ctx context.Context, elements []attr.Value) SetValueOf[T] { + return fwdiag.Must(NewSetValueOf[T](ctx, elements)) +} diff --git a/internal/framework/types/setof_test.go b/internal/framework/types/setof_test.go new file mode 100644 index 000000000000..017a587170f5 --- /dev/null +++ b/internal/framework/types/setof_test.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package types_test + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" +) + +func TestSetOfStringFromTerraform(t *testing.T) { + t.Parallel() + + ctx := context.Background() + tests := map[string]struct { + val tftypes.Value + expected attr.Value + }{ + "values": { + val: tftypes.NewValue(tftypes.Set{ + ElementType: tftypes.String, + }, []tftypes.Value{ + tftypes.NewValue(tftypes.String, "red"), + tftypes.NewValue(tftypes.String, "blue"), + tftypes.NewValue(tftypes.String, "green"), + }), + expected: fwtypes.NewSetValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("red"), + types.StringValue("blue"), + types.StringValue("green"), + }), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + val, err := fwtypes.SetOfStringType.ValueFromTerraform(ctx, test.val) + + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if diff := cmp.Diff(val, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go new file mode 100644 index 000000000000..9549da2770fa --- /dev/null +++ b/internal/service/securitylake/data_lake.go @@ -0,0 +1,571 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package securitylake + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/securitylake" + awstypes "github.com/aws/aws-sdk-go-v2/service/securitylake/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource(name="Data Lake") +// @Tags(identifierAttribute="arn") +func newDataLakeResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &dataLakeResource{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + r.SetDefaultUpdateTimeout(30 * time.Minute) + r.SetDefaultDeleteTimeout(30 * time.Minute) + + return r, nil +} + +const ( + ResNameDataLake = "Data Lake" +) + +type dataLakeResource struct { + framework.ResourceWithConfigure + framework.WithImportByID + framework.WithTimeouts +} + +func (r *dataLakeResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_securitylake_data_lake" +} + +func (r *dataLakeResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "arn": framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + "meta_store_manager_role_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "s3_bucket_arn": framework.ARNAttributeComputedOnly(), + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + "configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "encryption_configuration": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeEncryptionConfigurationModel](ctx), + Optional: true, + Computed: true, + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "kms_key_id": types.StringType, + }, + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + }, + "region": schema.StringAttribute{ + Required: true, + }, + }, + Blocks: map[string]schema.Block{ + "lifecycle_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeLifecycleConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "expiration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeLifecycleExpirationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "days": schema.Int64Attribute{ + Optional: true, + }, + }, + }, + }, + "transition": schema.SetNestedBlock{ + CustomType: fwtypes.NewSetNestedObjectTypeOf[dataLakeLifecycleTransitionModel](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "days": schema.Int64Attribute{ + Optional: true, + }, + "storage_class": schema.StringAttribute{ + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "replication_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dataLakeReplicationConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "regions": schema.SetAttribute{ + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + Optional: true, + }, + "role_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "timeouts": timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func (r *dataLakeResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data dataLakeResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + conn := r.Meta().SecurityLakeClient(ctx) + + input := &securitylake.CreateDataLakeInput{} + resp.Diagnostics.Append(flex.Expand(ctx, data, input)...) + if resp.Diagnostics.HasError() { + return + } + + input.Tags = getTagsIn(ctx) + + output, err := conn.CreateDataLake(ctx, input) + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionCreating, ResNameDataLake, data.ID.ValueString(), err), + err.Error(), + ) + return + } + + // Set values for unknowns. + dataLake := &output.DataLakes[0] + data.DataLakeARN = flex.StringToFramework(ctx, dataLake.DataLakeArn) + data.setID() + + dataLake, err = waitDataLakeCreated(ctx, conn, data.ID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForCreation, ResNameDataLake, data.ID.ValueString(), err), + err.Error(), + ) + return + } + + var configuration dataLakeConfigurationModel + resp.Diagnostics.Append(flex.Flatten(ctx, dataLake, &configuration)...) + if resp.Diagnostics.HasError() { + return + } + + // Set values for unknowns after creation is complete. + data.Configurations = fwtypes.NewListNestedObjectValueOfPtr(ctx, &configuration) + data.S3BucketARN = flex.StringToFramework(ctx, dataLake.S3BucketArn) + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *dataLakeResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().SecurityLakeClient(ctx) + + var data dataLakeResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + resp.Diagnostics.AddError("parsing resource ID", err.Error()) + return + } + + dataLake, err := findDataLakeByARN(ctx, conn, data.ID.ValueString()) + + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionSetting, ResNameDataLake, data.ID.String(), err), + err.Error(), + ) + return + } + + var configuration dataLakeConfigurationModel + resp.Diagnostics.Append(flex.Flatten(ctx, dataLake, &configuration)...) + if resp.Diagnostics.HasError() { + return + } + + data.Configurations = fwtypes.NewListNestedObjectValueOfPtr(ctx, &configuration) + data.S3BucketARN = flex.StringToFramework(ctx, dataLake.S3BucketArn) + + // Transparent tagging fails with "ResourceNotFoundException: The request failed because the specified resource doesn't exist." + // if the data lake's AWS Region isn't the configured one. + if region := configuration.Region.ValueString(); region != r.Meta().Region { + if tags, err := listTags(ctx, conn, data.ID.ValueString(), func(o *securitylake.Options) { o.Region = region }); err == nil { + setTagsOut(ctx, Tags(tags)) + } + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *dataLakeResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var old, new dataLakeResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &new)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(req.State.Get(ctx, &old)...) + if resp.Diagnostics.HasError() { + return + } + + conn := r.Meta().SecurityLakeClient(ctx) + + if !new.Configurations.Equal(old.Configurations) { + input := &securitylake.UpdateDataLakeInput{} + resp.Diagnostics.Append(flex.Expand(ctx, new, input)...) + if resp.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateDataLake(ctx, input) + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionUpdating, ResNameDataLake, new.ID.ValueString(), err), + err.Error(), + ) + return + } + + if _, err := waitDataLakeUpdated(ctx, conn, new.ID.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)); err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForUpdate, ResNameDataLake, new.ID.ValueString(), err), + err.Error(), + ) + return + } + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &new)...) +} + +func (r *dataLakeResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().SecurityLakeClient(ctx) + + var data dataLakeResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + _, err := conn.DeleteDataLake(ctx, &securitylake.DeleteDataLakeInput{ + Regions: []string{errs.Must(regionFromARNString(data.ID.ValueString()))}, + }) + + // No data lake: + // "An error occurred (AccessDeniedException) when calling the DeleteDataLake operation: User: ... is not authorized to perform: securitylake:DeleteDataLake" + if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "is not authorized to perform") { + return + } + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionDeleting, ResNameDataLake, data.ID.String(), err), + err.Error(), + ) + return + } + + if _, err = waitDataLakeDeleted(ctx, conn, data.ID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SecurityLake, create.ErrActionWaitingForDeletion, ResNameDataLake, data.ID.String(), err), + err.Error(), + ) + return + } +} + +func (r *dataLakeResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) +} + +func waitDataLakeCreated(ctx context.Context, conn *securitylake.Client, arn string, timeout time.Duration) (*awstypes.DataLakeResource, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DataLakeStatusInitialized), + Target: enum.Slice(awstypes.DataLakeStatusCompleted), + Refresh: statusDataLakeCreate(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.DataLakeResource); ok { + if v := output.UpdateStatus; v != nil { + if v := v.Exception; v != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(v.Code), aws.ToString(v.Reason))) + } + } + + return output, err + } + + return nil, err +} + +func waitDataLakeUpdated(ctx context.Context, conn *securitylake.Client, arn string, timeout time.Duration) (*awstypes.DataLakeResource, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DataLakeStatusPending, awstypes.DataLakeStatusInitialized), + Target: enum.Slice(awstypes.DataLakeStatusCompleted), + Refresh: statusDataLakeUpdate(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.DataLakeResource); ok { + if v := output.UpdateStatus; v != nil { + if v := v.Exception; v != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(v.Code), aws.ToString(v.Reason))) + } + } + + return output, err + } + + return nil, err +} + +func waitDataLakeDeleted(ctx context.Context, conn *securitylake.Client, arn string, timeout time.Duration) (*awstypes.DataLakeResource, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DataLakeStatusInitialized, awstypes.DataLakeStatusCompleted), + Target: []string{}, + Refresh: statusDataLakeCreate(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.DataLakeResource); ok { + if v := output.UpdateStatus; v != nil { + if v := v.Exception; v != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(v.Code), aws.ToString(v.Reason))) + } + } + + return output, err + } + + return nil, err +} + +func statusDataLakeCreate(ctx context.Context, conn *securitylake.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDataLakeByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.CreateStatus), nil + } +} + +func statusDataLakeUpdate(ctx context.Context, conn *securitylake.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDataLakeByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if output.UpdateStatus == nil { + return nil, "", nil + } + + return output, string(output.UpdateStatus.Status), nil + } +} + +type dataLakeResourceModel struct { + Configurations fwtypes.ListNestedObjectValueOf[dataLakeConfigurationModel] `tfsdk:"configuration"` + DataLakeARN types.String `tfsdk:"arn"` + ID types.String `tfsdk:"id"` + MetaStoreManagerRoleARN fwtypes.ARN `tfsdk:"meta_store_manager_role_arn"` + S3BucketARN types.String `tfsdk:"s3_bucket_arn"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +func (model *dataLakeResourceModel) InitFromID() error { + model.DataLakeARN = model.ID + + return nil +} + +func (model *dataLakeResourceModel) setID() { + model.ID = model.DataLakeARN +} + +type dataLakeConfigurationModel struct { + EncryptionConfiguration fwtypes.ListNestedObjectValueOf[dataLakeEncryptionConfigurationModel] `tfsdk:"encryption_configuration"` + LifecycleConfiguration fwtypes.ListNestedObjectValueOf[dataLakeLifecycleConfigurationModel] `tfsdk:"lifecycle_configuration"` + Region types.String `tfsdk:"region"` + ReplicationConfiguration fwtypes.ListNestedObjectValueOf[dataLakeReplicationConfigurationModel] `tfsdk:"replication_configuration"` +} + +type dataLakeEncryptionConfigurationModel struct { + KmsKeyID types.String `tfsdk:"kms_key_id"` +} + +type dataLakeLifecycleConfigurationModel struct { + Expiration fwtypes.ListNestedObjectValueOf[dataLakeLifecycleExpirationModel] `tfsdk:"expiration"` + Transitions fwtypes.SetNestedObjectValueOf[dataLakeLifecycleTransitionModel] `tfsdk:"transition"` +} + +type dataLakeLifecycleExpirationModel struct { + Days types.Int64 `tfsdk:"days"` +} + +type dataLakeLifecycleTransitionModel struct { + Days types.Int64 `tfsdk:"days"` + StorageClass types.String `tfsdk:"storage_class"` +} + +type dataLakeReplicationConfigurationModel struct { + Regions fwtypes.SetValueOf[types.String] `tfsdk:"regions"` + RoleARN fwtypes.ARN `tfsdk:"role_arn"` +} + +func findDataLakeByARN(ctx context.Context, conn *securitylake.Client, arn string) (*awstypes.DataLakeResource, error) { + input := &securitylake.ListDataLakesInput{ + Regions: []string{errs.Must(regionFromARNString(arn))}, + } + + return findDataLake(ctx, conn, input, func(v *awstypes.DataLakeResource) bool { + return aws.ToString(v.DataLakeArn) == arn + }) +} + +func findDataLake(ctx context.Context, conn *securitylake.Client, input *securitylake.ListDataLakesInput, filter tfslices.Predicate[*awstypes.DataLakeResource]) (*awstypes.DataLakeResource, error) { + output, err := findDataLakes(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findDataLakes(ctx context.Context, conn *securitylake.Client, input *securitylake.ListDataLakesInput, filter tfslices.Predicate[*awstypes.DataLakeResource]) ([]*awstypes.DataLakeResource, error) { + var dataLakes []*awstypes.DataLakeResource + + output, err := conn.ListDataLakes(ctx, input) + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + for _, v := range output.DataLakes { + v := v + if v := &v; filter(v) { + dataLakes = append(dataLakes, v) + } + } + + return dataLakes, nil +} + +// regionFromARNString return the AWS Region from the specified ARN string. +func regionFromARNString(s string) (string, error) { + v, err := arn.Parse(s) + + if err != nil { + return "", err + } + + return v.Region, nil +} diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go new file mode 100644 index 000000000000..93aa04ea80c6 --- /dev/null +++ b/internal/service/securitylake/data_lake_test.go @@ -0,0 +1,693 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package securitylake_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/securitylake/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfsecuritylake "github.com/hashicorp/terraform-provider-aws/internal/service/securitylake" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccDataLake_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.Region()), + resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttrSet(resourceName, "s3_bucket_arn"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + }, + }, + }) +} + +func testAccDataLake_disappears(t *testing.T) { + ctx := acctest.Context(t) + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfsecuritylake.ResourceDataLake, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccDataLake_tags(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + }, + { + Config: testAccDataLakeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccDataLakeConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func testAccDataLake_lifeCycle(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_lifeCycle(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "2"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.days", "80"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.storage_class", "ONEZONE_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + }, + }, + }) +} + +func testAccDataLake_lifeCycleUpdate(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_lifeCycle(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "2"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.days", "80"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.storage_class", "ONEZONE_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + }, + { + Config: testAccDataLakeConfig_lifeCycleUpdate(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", "id"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + }, + }, + }) +} + +func testAccDataLake_replication(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datalake types.DataLakeResource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_securitylake_data_lake.region_2" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) + acctest.PreCheckOrganizationsAccount(ctx, t) + acctest.PreCheckMultipleRegion(t, 2) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataLakeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataLakeConfig_replication(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", "arn"), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "S3_MANAGED_KEY"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.replication_configuration.0.role_arn", "aws_iam_role.datalake_s3_replication", "arn"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.0.regions.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.replication_configuration.0.regions.*", acctest.Region()), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + }, + }, + }) +} + +func testAccCheckDataLakeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityLakeClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_securitylake_data_lake" { + continue + } + + _, err := tfsecuritylake.FindDataLakeByARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return create.Error(names.SecurityLake, create.ErrActionCheckingDestroyed, tfsecuritylake.ResNameDataLake, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckDataLakeExists(ctx context.Context, name string, datalake *types.DataLakeResource) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.SecurityLake, create.ErrActionCheckingExistence, tfsecuritylake.ResNameDataLake, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.SecurityLake, create.ErrActionCheckingExistence, tfsecuritylake.ResNameDataLake, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityLakeClient(ctx) + resp, err := tfsecuritylake.FindDataLakeByARN(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.SecurityLake, create.ErrActionCheckingExistence, tfsecuritylake.ResNameDataLake, rs.Primary.ID, err) + } + + *datalake = *resp + + return nil + } +} + +func testAccDataLakeConfigConfig_base(rName string) string { + return ` +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +resource "aws_iam_role" "meta_store_manager" { + name = "AmazonSecurityLakeMetaStoreManager" + path = "/service-role/" + assume_role_policy = < 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets securitylake service tags in Context. +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + } +} + +// updateTags updates securitylake service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *securitylake.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*securitylake.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.SecurityLake) + if len(removedTags) > 0 { + input := &securitylake.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("untagging resource (%s): %w", identifier, err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.SecurityLake) + if len(updatedTags) > 0 { + input := &securitylake.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: Tags(updatedTags), + } + + _, err := conn.TagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// UpdateTags updates securitylake service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).SecurityLakeClient(ctx), identifier, oldTags, newTags) +} diff --git a/names/names.go b/names/names.go index a153b963fc0e..7eaf6c896fe6 100644 --- a/names/names.go +++ b/names/names.go @@ -75,6 +75,7 @@ const ( RolesAnywhereEndpointID = "rolesanywhere" Route53DomainsEndpointID = "route53domains" SchedulerEndpointID = "scheduler" + SecurityLakeEndpointID = "securitylake" ServiceQuotasEndpointID = "servicequotas" S3EndpointID = "s3" S3ControlEndpointID = "s3-control" diff --git a/website/docs/r/securitylake_data_lake.html.markdown b/website/docs/r/securitylake_data_lake.html.markdown new file mode 100644 index 000000000000..e983b587be76 --- /dev/null +++ b/website/docs/r/securitylake_data_lake.html.markdown @@ -0,0 +1,128 @@ +--- +subcategory: "Security Lake" +layout: "aws" +page_title: "AWS: aws_securitylake_data_lake" +description: |- + Terraform resource for managing an AWS Security Lake Data Lake. +--- + +# Resource: aws_securitylake_data_lake + +Terraform resource for managing an AWS Security Lake Data Lake. + +## Example Usage + +```terraform +resource "aws_securitylake_data_lake" "example" { + meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn + + configuration { + region = "eu-west-1" + + encryption_configuration { + kms_key_id = "S3_MANAGED_KEY" + } + + lifecycle_configuration { + transition { + days = 31 + storage_class = "STANDARD_IA" + } + transition { + days = 80 + storage_class = "ONEZONE_IA" + } + expiration { + days = 300 + } + } + } +} +``` + +### Basic Usage + +```terraform +resource "aws_securitylake_data_lake" "example" { + meta_store_manager_role_arn = aws_iam_role.meta_store_manager.arn + + configuration { + region = "eu-west-1" + + encryption_configuration { + kms_key_id = "S3_MANAGED_KEY" + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `meta_store_manager_role_arn` - (Required) The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources. +* `configuration` - (Required) Specify the Region or Regions that will contribute data to the rollup region. +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +Configurations support the following: + +* `region` - (Required) The AWS Regions where Security Lake is automatically enabled. +* `encryption_configuration` - (Optional) Provides encryption details of Amazon Security Lake object. +* `lifecycle_configuration` - (Optional) Provides lifecycle details of Amazon Security Lake object. +* `replication_configuration` - (Optional) Provides replication details of Amazon Security Lake object. + +Encryption Configuration support the following: + +* `kms_key_id` - (Optional) The id of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object. + +Lifecycle Configuration support the following: + +* `expiration` - (Optional) Provides data expiration details of Amazon Security Lake object. +* `transition` - (Optional) Provides data storage transition details of Amazon Security Lake object. + +Expiration Configuration support the following: + +* `days` - (Optional) Number of days before data expires in the Amazon Security Lake object. + +Transitions support the following: + +* `days` - (Optional) Number of days before data transition to a different S3 Storage Class in the Amazon Security Lake object. +* `storage_class` - (Optional) The range of storage classes that you can choose from based on the data access, resiliency, and cost requirements of your workloads. + +Replication Configuration support the following: + +* `regions` - (Optional) Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same AWS account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different AWS Regions or within the same Region as the source bucket. +* `role_arn` - (Optional) Replication settings for the Amazon S3 buckets. This parameter uses the AWS Identity and Access Management (IAM) role you created that is managed by Security Lake, to ensure the replication setting is correct. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Data Lake. +* `s3_bucket_arn` - The ARN for the Amazon Security Lake Amazon S3 bucket. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `60m`) +* `update` - (Default `180m`) +* `delete` - (Default `90m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Hub standards subscriptions using the standards subscription ARN. For example: + +```terraform +import { + to = aws_securitylake_data_lake.example + id = "arn:aws:securitylake:eu-west-1:123456789012:data-lake/default" +} +``` + +Using `terraform import`, import Security Hub standards subscriptions using the standards subscription ARN. For example: + +```console +% terraform import aws_securitylake_data_lake.example arn:aws:securitylake:eu-west-1:123456789012:data-lake/default +```