From d2fd83b6c5a5fee5854abf36175b4b453552ce37 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 7 Mar 2023 14:20:08 -0800 Subject: [PATCH 01/49] first pass at data_quality_job_definition resource --- .../sagemaker/data_quality_job_definition.go | 595 ++++++++++++++++++ internal/service/sagemaker/find.go | 25 + .../service/sagemaker/service_package_gen.go | 1 + 3 files changed, 621 insertions(+) create mode 100644 internal/service/sagemaker/data_quality_job_definition.go diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go new file mode 100644 index 000000000000..7131ee80ae32 --- /dev/null +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -0,0 +1,595 @@ +package sagemaker + +import ( + "context" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +// @SDKResource("aws_sagemaker_data_quality_job_definition") +func ResourceDataQualityJobDefinition() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceDataQualityJobDefinitionCreate, + ReadWithoutTimeout: resourceDataQualityJobDefinitionRead, + UpdateWithoutTimeout: resourceDataQualityJobDefinitionUpdate, + DeleteWithoutTimeout: resourceDataQualityJobDefinitionDelete, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "data_quality_app_specification": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validImage, + }, + }, + }, + }, + "data_quality_job_input": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoint_input": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoint_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validName, + }, + "local_path": { + Type: schema.TypeString, + Optional: true, + Default: "/opt/ml/processing/input", + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^\/opt\/ml\/processing\/.*`), "Must start with `/opt/ml/processing`."), + ), + }, + }, + }, + }, + }, + }, + }, + "data_quality_job_output_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "monitoring_outputs": { + Type: schema.TypeList, + MinItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "s3_output": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_path": { + Type: schema.TypeString, + Optional: true, + Default: "/opt/ml/processing/output", + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^\/opt\/ml\/processing\/.*`), "Must start with `/opt/ml/processing`."), + ), + }, + "s3_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringMatch(regexp.MustCompile(`^(https|s3)://([^/])/?(.*)$`), ""), + validation.StringLenBetween(1, 512), + ), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "job_resources": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(sagemaker.ProcessingInstanceType_Values(), false), + }, + "volume_size_in_gb": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 512), + }, + }, + }, + }, + }, + }, + }, + "role_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validName, + }, + "tags": tftags.TagsSchema(), + "tags_all": tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +func resourceDataQualityJobDefinitionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(ctx, d.Get("tags").(map[string]interface{}))) + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else { + name = resource.UniqueId() + } + + var roleArn string + if v, ok := d.GetOk("role_arn"); ok { + roleArn = v.(string) + } + + createOpts := &sagemaker.CreateDataQualityJobDefinitionInput{ + JobDefinitionName: aws.String(name), + DataQualityAppSpecification: expandDataQualityAppSpecification(d.Get("data_quality_app_specification").([]interface{})), + DataQualityJobInput: expandDataQualityJobInput(d.Get("data_quality_job_input").([]interface{})), + DataQualityJobOutputConfig: expandDataQualityJobOutputConfig(d.Get("data_quality_job_output_config").([]interface{})), + JobResources: expandJobResources(d.Get("job_resources").([]interface{})), + RoleArn: aws.String(roleArn), + } + + if len(tags) > 0 { + createOpts.Tags = Tags(tags.IgnoreAWS()) + } + + log.Printf("[DEBUG] SageMaker Data Quality Job Definition create config: %#v", *createOpts) + _, err := conn.CreateDataQualityJobDefinitionWithContext(ctx, createOpts) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating SageMaker Data Quality Job Definition: %s", err) + } + d.SetId(name) + + return append(diags, resourceDataQualityJobDefinitionRead(ctx, d, meta)...) +} + +func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + + jobDefinition, err := FindDataQualityJobDefinitionByName(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] SageMaker Data Quality Job Definition (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + + d.Set("arn", jobDefinition.JobDefinitionArn) + d.Set("name", jobDefinition.JobDefinitionName) + d.Set("role_arn", jobDefinition.RoleArn) + + if err := d.Set("data_quality_app_specification", flattenDataQualityAppSpecification(jobDefinition.DataQualityAppSpecification)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting data_quality_app_specification for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + + if err := d.Set("data_quality_job_input", flattenDataQualityJobInput(jobDefinition.DataQualityJobInput)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting data_quality_job_input for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + + if err := d.Set("data_quality_job_output_config", flattenDataQualityJobOutputConfig(jobDefinition.DataQualityJobOutputConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting data_quality_job_output_config for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + + if err := d.Set("job_resources", flattenJobResources(jobDefinition.JobResources)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting job_resources for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + + tags, err := ListTags(ctx, conn, aws.StringValue(jobDefinition.JobDefinitionArn)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing tags for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + + tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags_all: %s", err) + } + + return diags +} + +func flattenDataQualityAppSpecification(appSpecification *sagemaker.DataQualityAppSpecification) []map[string]interface{} { + if appSpecification == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if appSpecification.ImageUri != nil { + spec["image_uri"] = aws.StringValue(appSpecification.ImageUri) + } + + return []map[string]interface{}{spec} +} + +func flattenDataQualityJobInput(jobInput *sagemaker.DataQualityJobInput) []map[string]interface{} { + if jobInput == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if jobInput.EndpointInput != nil { + spec["endpoint_input"] = flattenEndpointInput(jobInput.EndpointInput) + } + + return []map[string]interface{}{spec} +} + +func flattenEndpointInput(endpointInput *sagemaker.EndpointInput) []map[string]interface{} { + if endpointInput == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if endpointInput.EndpointName != nil { + spec["endpoint_name"] = aws.StringValue(endpointInput.EndpointName) + } + + if endpointInput.LocalPath != nil { + spec["local_path"] = aws.StringValue(endpointInput.LocalPath) + } + + return []map[string]interface{}{spec} +} + +func flattenDataQualityJobOutputConfig(outputConfig *sagemaker.MonitoringOutputConfig) []map[string]interface{} { + if outputConfig == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if outputConfig.MonitoringOutputs != nil { + spec["monitoring_outputs"] = flattenMonitoringOutputs(outputConfig.MonitoringOutputs) + } + + return []map[string]interface{}{spec} +} + +func flattenMonitoringOutputs(list []*sagemaker.MonitoringOutput) []map[string]interface{} { + containers := make([]map[string]interface{}, 0, len(list)) + + for _, lRaw := range list { + monitoringOutput := make(map[string]interface{}) + monitoringOutput["s3_output"] = flattenS3Output(lRaw.S3Output) + containers = append(containers, monitoringOutput) + } + + return containers +} + +func flattenS3Output(s3Output *sagemaker.MonitoringS3Output) []map[string]interface{} { + if s3Output == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if s3Output.LocalPath != nil { + spec["local_path"] = aws.StringValue(s3Output.LocalPath) + } + + if s3Output.S3Uri != nil { + spec["s3_uri"] = aws.StringValue(s3Output.S3Uri) + } + + return []map[string]interface{}{spec} +} + +func flattenJobResources(jobResources *sagemaker.MonitoringResources) []map[string]interface{} { + if jobResources == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if jobResources.ClusterConfig != nil { + spec["cluster_config"] = flattenClusterConfig(jobResources.ClusterConfig) + } + + return []map[string]interface{}{spec} +} + +func flattenClusterConfig(clusterConfig *sagemaker.MonitoringClusterConfig) []map[string]interface{} { + if clusterConfig == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if clusterConfig.InstanceCount != nil { + spec["instance_count"] = aws.Int64Value(clusterConfig.InstanceCount) + } + + if clusterConfig.InstanceType != nil { + spec["instance_type"] = aws.StringValue(clusterConfig.InstanceType) + } + + if clusterConfig.VolumeSizeInGB != nil { + spec["volume_size_in_gb"] = aws.Int64Value(clusterConfig.VolumeSizeInGB) + } + + return []map[string]interface{}{spec} +} + +func resourceDataQualityJobDefinitionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(ctx, conn, d.Get("arn").(string), o, n); err != nil { + return sdkdiag.AppendErrorf(diags, "updating SageMaker Data Quality Job Definition (%s) tags: %s", d.Id(), err) + } + } + return append(diags, resourceEndpointConfigurationRead(ctx, d, meta)...) +} + +func resourceDataQualityJobDefinitionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + + deleteOpts := &sagemaker.DeleteDataQualityJobDefinitionInput{ + JobDefinitionName: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting SageMaker Data Quality Job Definition : %s", d.Id()) + + _, err := conn.DeleteDataQualityJobDefinitionWithContext(ctx, deleteOpts) + + if tfawserr.ErrMessageContains(err, "ValidationException", "Could not find data quality job definition") { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + + return diags +} + +func expandDataQualityAppSpecification(configured []interface{}) *sagemaker.DataQualityAppSpecification { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.DataQualityAppSpecification{} + + if v, ok := m["image_uri"].(string); ok && v != "" { + c.ImageUri = aws.String(v) + } + + return c +} + +func expandDataQualityJobInput(configured []interface{}) *sagemaker.DataQualityJobInput { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.DataQualityJobInput{} + + if v, ok := m["endpoint_input"].([]interface{}); ok && len(v) > 0 { + c.EndpointInput = expandEndpointInput(v) + } + + return c +} + +func expandEndpointInput(configured []interface{}) *sagemaker.EndpointInput { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.EndpointInput{} + + if v, ok := m["endpoint_name"].(string); ok && v != "" { + c.EndpointName = aws.String(v) + } + + if v, ok := m["local_path"].(string); ok && v != "" { + c.LocalPath = aws.String(v) + } + + return c +} + +func expandDataQualityJobOutputConfig(configured []interface{}) *sagemaker.MonitoringOutputConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringOutputConfig{} + + if v, ok := m["monitoring_outputs"].([]interface{}); ok && len(v) > 0 { + c.MonitoringOutputs = expandMonitoringOutputs(v) + } + + return c +} + +func expandMonitoringOutputs(configured []interface{}) []*sagemaker.MonitoringOutput { + containers := make([]*sagemaker.MonitoringOutput, 0, len(configured)) + + for _, lRaw := range configured { + data := lRaw.(map[string]interface{}) + + l := &sagemaker.MonitoringOutput{ + S3Output: expandS3Output(data["s3_output"].([]interface{})), + } + containers = append(containers, l) + } + + return containers +} + +func expandS3Output(configured []interface{}) *sagemaker.MonitoringS3Output { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringS3Output{} + + if v, ok := m["local_path"].(string); ok && v != "" { + c.LocalPath = aws.String(v) + } + + if v, ok := m["s3_uri"].(string); ok && v != "" { + c.S3Uri = aws.String(v) + } + + return c +} + +func expandJobResources(configured []interface{}) *sagemaker.MonitoringResources { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringResources{} + + if v, ok := m["cluster_config"].([]interface{}); ok && len(v) > 0 { + c.ClusterConfig = expandClusterConfig(v) + } + + return c +} + +func expandClusterConfig(configured []interface{}) *sagemaker.MonitoringClusterConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringClusterConfig{} + + if v, ok := m["instance_count"].(int); ok && v > 0 { + c.InstanceCount = aws.Int64(int64(v)) + } + if v, ok := m["instance_type"].(string); ok && v != "" { + c.InstanceType = aws.String(v) + } + + if v, ok := m["volume_size_in_gb"].(int); ok && v > 0 { + c.VolumeSizeInGB = aws.Int64(int64(v)) + } + + return c +} diff --git a/internal/service/sagemaker/find.go b/internal/service/sagemaker/find.go index c6f6c7b94799..ff10d9900231 100644 --- a/internal/service/sagemaker/find.go +++ b/internal/service/sagemaker/find.go @@ -497,6 +497,31 @@ func FindEndpointConfigByName(ctx context.Context, conn *sagemaker.SageMaker, na return output, nil } +func FindDataQualityJobDefinitionByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeDataQualityJobDefinitionOutput, error) { + input := &sagemaker.DescribeDataQualityJobDefinitionInput{ + JobDefinitionName: aws.String(name), + } + + output, err := conn.DescribeDataQualityJobDefinitionWithContext(ctx, input) + + if tfawserr.ErrMessageContains(err, ErrCodeValidationException, "Could not find data quality job definition") { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + func FindFlowDefinitionByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeFlowDefinitionOutput, error) { input := &sagemaker.DescribeFlowDefinitionInput{ FlowDefinitionName: aws.String(name), diff --git a/internal/service/sagemaker/service_package_gen.go b/internal/service/sagemaker/service_package_gen.go index 6666a6c93960..bbe8dc74eb9c 100644 --- a/internal/service/sagemaker/service_package_gen.go +++ b/internal/service/sagemaker/service_package_gen.go @@ -32,6 +32,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) map[string]func() *sc "aws_sagemaker_app": ResourceApp, "aws_sagemaker_app_image_config": ResourceAppImageConfig, "aws_sagemaker_code_repository": ResourceCodeRepository, + "aws_sagemaker_data_quality_job_definition": ResourceDataQualityJobDefinition, "aws_sagemaker_device": ResourceDevice, "aws_sagemaker_device_fleet": ResourceDeviceFleet, "aws_sagemaker_domain": ResourceDomain, From 3c527d3bf31e30f97420fe41ba1af1326f15d157 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 10 Mar 2023 15:11:53 -0800 Subject: [PATCH 02/49] adding full data quality job def api --- .../sagemaker/data_quality_job_definition.go | 1007 ++++++++++++++++- 1 file changed, 973 insertions(+), 34 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index 7131ee80ae32..c327a4482904 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -41,12 +42,105 @@ func ResourceDataQualityJobDefinition() *schema.Resource { ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "container_arguments": { + Type: schema.TypeSet, + MinItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "container_entrypoint": { + Type: schema.TypeSet, + MinItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "environment": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + ValidateFunc: validEnvironment, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "image_uri": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validImage, }, + "post_analytics_processor_source_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringMatch(regexp.MustCompile(`^(https|s3)://([^/])/?(.*)$`), ""), + validation.StringLenBetween(1, 512), + ), + }, + "record_preprocessor_source_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringMatch(regexp.MustCompile(`^(https|s3)://([^/])/?(.*)$`), ""), + validation.StringLenBetween(1, 512), + ), + }, + }, + }, + }, + "data_quality_baseline_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "baselining_job_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validName, + }, + "constraints_resource": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "s3_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringMatch(regexp.MustCompile(`^(https|s3)://([^/])/?(.*)$`), ""), + validation.StringLenBetween(1, 512), + ), + }, + }, + }, + }, + "statistics_resource": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "s3_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringMatch(regexp.MustCompile(`^(https|s3)://([^/])/?(.*)$`), ""), + validation.StringLenBetween(1, 512), + ), + }, + }, + }, + }, }, }, }, @@ -57,22 +151,158 @@ func ResourceDataQualityJobDefinition() *schema.Resource { ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "batch_transform_input": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_captured_destination_s3_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringMatch(regexp.MustCompile(`^(https|s3)://([^/])/?(.*)$`), ""), + validation.StringLenBetween(1, 512), + ), + }, + "dataset_format": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "csv": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "json": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "line": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "parquet": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + }, + }, + }, + "end_time_offset": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "features_attribute": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "inference_attribute": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "local_path": { + Type: schema.TypeString, + Required: true, + Default: "/opt/ml/processing/input", + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^\/opt\/ml\/processing\/.*`), "Must start with `/opt/ml/processing`."), + ), + }, + "probability_attribute": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "probability_threshold_attribute": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + ValidateFunc: validation.FloatAtLeast(0), + }, + "s3_data_distribution_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3DataDistributionType_Values(), false), + }, + "s3_input_mode": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3InputMode_Values(), false), + }, + "start_time_offset": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + }, + }, + }, "endpoint_input": { Type: schema.TypeList, MaxItems: 1, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "end_time_offset": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, "endpoint_name": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validName, }, - "local_path": { + "features_attribute": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "inference_attribute": { Type: schema.TypeString, + ForceNew: true, Optional: true, + }, + "local_path": { + Type: schema.TypeString, + Required: true, Default: "/opt/ml/processing/input", ForceNew: true, ValidateFunc: validation.All( @@ -80,6 +310,34 @@ func ResourceDataQualityJobDefinition() *schema.Resource { validation.StringMatch(regexp.MustCompile(`^\/opt\/ml\/processing\/.*`), "Must start with `/opt/ml/processing`."), ), }, + "probability_attribute": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "probability_threshold_attribute": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + ValidateFunc: validation.FloatAtLeast(0), + }, + "s3_data_distribution_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3DataDistributionType_Values(), false), + }, + "s3_input_mode": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3InputMode_Values(), false), + }, + "start_time_offset": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, }, }, }, @@ -93,6 +351,12 @@ func ResourceDataQualityJobDefinition() *schema.Resource { ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, "monitoring_outputs": { Type: schema.TypeList, MinItems: 1, @@ -109,7 +373,7 @@ func ResourceDataQualityJobDefinition() *schema.Resource { Schema: map[string]*schema.Schema{ "local_path": { Type: schema.TypeString, - Optional: true, + Required: true, Default: "/opt/ml/processing/output", ForceNew: true, ValidateFunc: validation.All( @@ -117,6 +381,12 @@ func ResourceDataQualityJobDefinition() *schema.Resource { validation.StringMatch(regexp.MustCompile(`^\/opt\/ml\/processing\/.*`), "Must start with `/opt/ml/processing`."), ), }, + "s3_upload_mode": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3UploadMode_Values(), false), + }, "s3_uri": { Type: schema.TypeString, Required: true, @@ -161,6 +431,12 @@ func ResourceDataQualityJobDefinition() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringInSlice(sagemaker.ProcessingInstanceType_Values(), false), }, + "volume_kms_key_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, "volume_size_in_gb": { Type: schema.TypeInt, Required: true, @@ -173,11 +449,6 @@ func ResourceDataQualityJobDefinition() *schema.Resource { }, }, }, - "role_arn": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, "name": { Type: schema.TypeString, Optional: true, @@ -185,6 +456,73 @@ func ResourceDataQualityJobDefinition() *schema.Resource { ForceNew: true, ValidateFunc: validName, }, + "network_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_inter_container_traffic_encryption": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "enable_network_isolation": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "vpc_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "security_group_ids": { + Type: schema.TypeSet, + MinItems: 1, + MaxItems: 5, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "subnets": { + Type: schema.TypeSet, + MinItems: 1, + MaxItems: 16, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "role_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "stopping_condition": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_runtime_in_seconds": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 3600), + }, + }, + }, + }, "tags": tftags.TagsSchema(), "tags_all": tftags.TagsSchemaComputed(), }, @@ -219,6 +557,18 @@ func resourceDataQualityJobDefinitionCreate(ctx context.Context, d *schema.Resou RoleArn: aws.String(roleArn), } + if v, ok := d.GetOk("data_quality_baseline_config"); ok && len(v.([]interface{})) > 0 { + createOpts.DataQualityBaselineConfig = expandDataQualityBaselineConfig(v.([]interface{})) + } + + if v, ok := d.GetOk("network_config"); ok && len(v.([]interface{})) > 0 { + createOpts.NetworkConfig = expandNetworkConfig(v.([]interface{})) + } + + if v, ok := d.GetOk("stopping_condition"); ok && len(v.([]interface{})) > 0 { + createOpts.StoppingCondition = expandStoppingCondition(v.([]interface{})) + } + if len(tags) > 0 { createOpts.Tags = Tags(tags.IgnoreAWS()) } @@ -259,6 +609,10 @@ func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.Resourc return sdkdiag.AppendErrorf(diags, "setting data_quality_app_specification for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) } + if err := d.Set("data_quality_baseline_config", flattenDataQualityBaselineConfig(jobDefinition.DataQualityBaselineConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting data_quality_baseline_config for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + if err := d.Set("data_quality_job_input", flattenDataQualityJobInput(jobDefinition.DataQualityJobInput)); err != nil { return sdkdiag.AppendErrorf(diags, "setting data_quality_job_input for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) } @@ -271,6 +625,14 @@ func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.Resourc return sdkdiag.AppendErrorf(diags, "setting job_resources for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) } + if err := d.Set("network_config", flattenNetworkConfig(jobDefinition.NetworkConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting network_config for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + + if err := d.Set("stopping_condition", flattenStoppingCondition(jobDefinition.StoppingCondition)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting stopping_condition for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + tags, err := ListTags(ctx, conn, aws.StringValue(jobDefinition.JobDefinitionArn)) if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) @@ -301,60 +663,274 @@ func flattenDataQualityAppSpecification(appSpecification *sagemaker.DataQualityA spec["image_uri"] = aws.StringValue(appSpecification.ImageUri) } - return []map[string]interface{}{spec} -} + if appSpecification.ContainerArguments != nil { + spec["container_arguments"] = flex.FlattenStringSet(appSpecification.ContainerArguments) + } -func flattenDataQualityJobInput(jobInput *sagemaker.DataQualityJobInput) []map[string]interface{} { - if jobInput == nil { - return []map[string]interface{}{} + if appSpecification.ContainerEntrypoint != nil { + spec["container_entrypoint"] = flex.FlattenStringSet(appSpecification.ContainerEntrypoint) } - spec := map[string]interface{}{} + if appSpecification.Environment != nil { + spec["environment"] = aws.StringValueMap(appSpecification.Environment) + } - if jobInput.EndpointInput != nil { - spec["endpoint_input"] = flattenEndpointInput(jobInput.EndpointInput) + if appSpecification.PostAnalyticsProcessorSourceUri != nil { + spec["post_analytics_processor_source_uri"] = aws.StringValue(appSpecification.PostAnalyticsProcessorSourceUri) + } + + if appSpecification.RecordPreprocessorSourceUri != nil { + spec["record_preprocessor_source_uri"] = aws.StringValue(appSpecification.RecordPreprocessorSourceUri) } return []map[string]interface{}{spec} } -func flattenEndpointInput(endpointInput *sagemaker.EndpointInput) []map[string]interface{} { - if endpointInput == nil { +func flattenDataQualityBaselineConfig(baselineConfig *sagemaker.DataQualityBaselineConfig) []map[string]interface{} { + if baselineConfig == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + fConfig := map[string]interface{}{} - if endpointInput.EndpointName != nil { - spec["endpoint_name"] = aws.StringValue(endpointInput.EndpointName) + if baselineConfig.BaseliningJobName != nil { + fConfig["baselining_job_name"] = aws.StringValue(baselineConfig.BaseliningJobName) } - if endpointInput.LocalPath != nil { - spec["local_path"] = aws.StringValue(endpointInput.LocalPath) + if baselineConfig.ConstraintsResource != nil { + fConfig["constraints_resource"] = flattenConstraintsResource(baselineConfig.ConstraintsResource) } - return []map[string]interface{}{spec} + if baselineConfig.StatisticsResource != nil { + fConfig["statistics_resource"] = flattenStatisticsResource(baselineConfig.StatisticsResource) + } + + return []map[string]interface{}{fConfig} } -func flattenDataQualityJobOutputConfig(outputConfig *sagemaker.MonitoringOutputConfig) []map[string]interface{} { - if outputConfig == nil { +func flattenConstraintsResource(constraintsResource *sagemaker.MonitoringConstraintsResource) []map[string]interface{} { + if constraintsResource == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + fResource := map[string]interface{}{} - if outputConfig.MonitoringOutputs != nil { - spec["monitoring_outputs"] = flattenMonitoringOutputs(outputConfig.MonitoringOutputs) + if constraintsResource.S3Uri != nil { + fResource["s3_uri"] = aws.StringValue(constraintsResource.S3Uri) } - return []map[string]interface{}{spec} + return []map[string]interface{}{fResource} } -func flattenMonitoringOutputs(list []*sagemaker.MonitoringOutput) []map[string]interface{} { - containers := make([]map[string]interface{}, 0, len(list)) +func flattenStatisticsResource(statisticsResource *sagemaker.MonitoringStatisticsResource) []map[string]interface{} { + if statisticsResource == nil { + return []map[string]interface{}{} + } - for _, lRaw := range list { - monitoringOutput := make(map[string]interface{}) + fResource := map[string]interface{}{} + + if statisticsResource.S3Uri != nil { + fResource["s3_uri"] = aws.StringValue(statisticsResource.S3Uri) + } + + return []map[string]interface{}{fResource} +} + +func flattenDataQualityJobInput(jobInput *sagemaker.DataQualityJobInput) []map[string]interface{} { + if jobInput == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if jobInput.EndpointInput != nil { + spec["endpoint_input"] = flattenEndpointInput(jobInput.EndpointInput) + } + + if jobInput.BatchTransformInput != nil { + spec["batch_transform_input"] = flattenBatchTransformInput(jobInput.BatchTransformInput) + } + + return []map[string]interface{}{spec} +} + +func flattenBatchTransformInput(transformInput *sagemaker.BatchTransformInput_) []map[string]interface{} { + if transformInput == nil { + return []map[string]interface{}{} + } + + fInput := map[string]interface{}{} + + if transformInput.LocalPath != nil { + fInput["local_path"] = aws.StringValue(transformInput.LocalPath) + } + + if transformInput.DataCapturedDestinationS3Uri != nil { + fInput["local_path"] = aws.StringValue(transformInput.DataCapturedDestinationS3Uri) + } + + if transformInput.DatasetFormat != nil { + fInput["dataset_format"] = flattenDatasetFormat(transformInput.DatasetFormat) + } + + if transformInput.EndTimeOffset != nil { + fInput["end_time_offset"] = aws.StringValue(transformInput.EndTimeOffset) + } + + if transformInput.FeaturesAttribute != nil { + fInput["features_attribute"] = aws.StringValue(transformInput.FeaturesAttribute) + } + + if transformInput.InferenceAttribute != nil { + fInput["inference_attribute"] = aws.StringValue(transformInput.InferenceAttribute) + } + + if transformInput.ProbabilityAttribute != nil { + fInput["probability_attribute"] = aws.StringValue(transformInput.ProbabilityAttribute) + } + + if transformInput.ProbabilityThresholdAttribute != nil { + fInput["probability_threshold_attribute"] = aws.Float64Value(transformInput.ProbabilityThresholdAttribute) + } + + if transformInput.S3DataDistributionType != nil { + fInput["s3_data_distribution_type"] = aws.StringValue(transformInput.S3DataDistributionType) + } + + if transformInput.S3InputMode != nil { + fInput["s3_input_mode"] = aws.StringValue(transformInput.S3InputMode) + } + + if transformInput.StartTimeOffset != nil { + fInput["start_time_offset"] = aws.StringValue(transformInput.StartTimeOffset) + } + + return []map[string]interface{}{fInput} +} + +func flattenDatasetFormat(datasetFormat *sagemaker.MonitoringDatasetFormat) []map[string]interface{} { + if datasetFormat == nil { + return []map[string]interface{}{} + } + + fFormat := map[string]interface{}{} + + if datasetFormat.Csv != nil { + fFormat["csv"] = flattenCsv(datasetFormat.Csv) + } + + if datasetFormat.Json != nil { + fFormat["json"] = flattenJson(datasetFormat.Json) + } + + if datasetFormat.Parquet != nil { + fFormat["parquet"] = []map[string]interface{}{} + } + + return []map[string]interface{}{fFormat} +} + +func flattenCsv(csv *sagemaker.MonitoringCsvDatasetFormat) []map[string]interface{} { + if csv == nil { + return []map[string]interface{}{} + } + + fCsv := map[string]interface{}{} + + if csv.Header != nil { + fCsv["header"] = aws.BoolValue(csv.Header) + } + + return []map[string]interface{}{fCsv} +} + +func flattenJson(json *sagemaker.MonitoringJsonDatasetFormat) []map[string]interface{} { + if json == nil { + return []map[string]interface{}{} + } + + fJson := map[string]interface{}{} + + if json.Line != nil { + fJson["line"] = aws.BoolValue(json.Line) + } + + return []map[string]interface{}{fJson} +} + +func flattenEndpointInput(endpointInput *sagemaker.EndpointInput) []map[string]interface{} { + if endpointInput == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if endpointInput.EndpointName != nil { + spec["endpoint_name"] = aws.StringValue(endpointInput.EndpointName) + } + + if endpointInput.LocalPath != nil { + spec["local_path"] = aws.StringValue(endpointInput.LocalPath) + } + + if endpointInput.EndTimeOffset != nil { + spec["end_time_offset"] = aws.StringValue(endpointInput.EndTimeOffset) + } + + if endpointInput.FeaturesAttribute != nil { + spec["features_attribute"] = aws.StringValue(endpointInput.FeaturesAttribute) + } + + if endpointInput.InferenceAttribute != nil { + spec["inference_attribute"] = aws.StringValue(endpointInput.InferenceAttribute) + } + + if endpointInput.ProbabilityAttribute != nil { + spec["probability_attribute"] = aws.StringValue(endpointInput.ProbabilityAttribute) + } + + if endpointInput.ProbabilityThresholdAttribute != nil { + spec["probability_threshold_attribute"] = aws.Float64Value(endpointInput.ProbabilityThresholdAttribute) + } + + if endpointInput.S3DataDistributionType != nil { + spec["s3_data_distribution_type"] = aws.StringValue(endpointInput.S3DataDistributionType) + } + + if endpointInput.S3InputMode != nil { + spec["s3_input_mode"] = aws.StringValue(endpointInput.S3InputMode) + } + + if endpointInput.StartTimeOffset != nil { + spec["start_time_offset"] = aws.StringValue(endpointInput.StartTimeOffset) + } + + return []map[string]interface{}{spec} +} + +func flattenDataQualityJobOutputConfig(outputConfig *sagemaker.MonitoringOutputConfig) []map[string]interface{} { + if outputConfig == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if outputConfig.KmsKeyId != nil { + spec["kms_key_id"] = aws.StringValue(outputConfig.KmsKeyId) + } + + if outputConfig.MonitoringOutputs != nil { + spec["monitoring_outputs"] = flattenMonitoringOutputs(outputConfig.MonitoringOutputs) + } + + return []map[string]interface{}{spec} +} + +func flattenMonitoringOutputs(list []*sagemaker.MonitoringOutput) []map[string]interface{} { + containers := make([]map[string]interface{}, 0, len(list)) + + for _, lRaw := range list { + monitoringOutput := make(map[string]interface{}) monitoringOutput["s3_output"] = flattenS3Output(lRaw.S3Output) containers = append(containers, monitoringOutput) } @@ -373,6 +949,10 @@ func flattenS3Output(s3Output *sagemaker.MonitoringS3Output) []map[string]interf spec["local_path"] = aws.StringValue(s3Output.LocalPath) } + if s3Output.S3UploadMode != nil { + spec["s3_upload_mode"] = aws.StringValue(s3Output.S3UploadMode) + } + if s3Output.S3Uri != nil { spec["s3_uri"] = aws.StringValue(s3Output.S3Uri) } @@ -409,6 +989,10 @@ func flattenClusterConfig(clusterConfig *sagemaker.MonitoringClusterConfig) []ma spec["instance_type"] = aws.StringValue(clusterConfig.InstanceType) } + if clusterConfig.VolumeKmsKeyId != nil { + spec["volume_kms_key_id"] = aws.StringValue(clusterConfig.VolumeKmsKeyId) + } + if clusterConfig.VolumeSizeInGB != nil { spec["volume_size_in_gb"] = aws.Int64Value(clusterConfig.VolumeSizeInGB) } @@ -416,6 +1000,64 @@ func flattenClusterConfig(clusterConfig *sagemaker.MonitoringClusterConfig) []ma return []map[string]interface{}{spec} } +func flattenNetworkConfig(networkConfig *sagemaker.NetworkConfig) []map[string]interface{} { + if networkConfig == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if networkConfig.ClusterConfig != nil { + spec["cluster_config"] = flattenClusterConfig(jobResources.ClusterConfig) + } + + if networkConfig.EnableInterContainerTrafficEncryption != nil { + spec["enable_inter_container_traffic_encryption"] = aws.BoolValue(networkConfig.EnableInterContainerTrafficEncryption) + } + + if networkConfig.EnableNetworkIsolation != nil { + spec["enable_network_isolation"] = aws.BoolValue(networkConfig.EnableNetworkIsolation) + } + + if networkConfig.VpcConfig != nil { + spec["vpc_config"] = flattenVpcConfig(networkConfig.VpcConfig) + } + + return []map[string]interface{}{spec} +} + +func flattenVpcConfig(vpcConfig *sagemaker.VpcConfig) []map[string]interface{} { + if vpcConfig == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if vpcConfig.SecurityGroupIds != nil { + spec["security_group_ids"] = flex.FlattenStringSet(vpcConfig.SecurityGroupIds) + } + + if vpcConfig.Subnets != nil { + spec["subnets"] = flex.FlattenStringSet(vpcConfig.Subnets) + } + + return []map[string]interface{}{spec} +} + +func flattenStoppingCondition(stoppingCondition *sagemaker.StoppingCondition) []map[string]interface{} { + if stoppingCondition == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if stoppingCondition.MaxRuntimeInSeconds != nil { + spec["max_runtime_in_seconds"] = aws.Int64Value(stoppingCondition.MaxRuntimeInSeconds) + } + + return []map[string]interface{}{spec} +} + func resourceDataQualityJobDefinitionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).SageMakerConn() @@ -465,6 +1107,82 @@ func expandDataQualityAppSpecification(configured []interface{}) *sagemaker.Data c.ImageUri = aws.String(v) } + if v, ok := m["container_arguments"].(*schema.Set); ok && v.Len() > 0 { + c.ContainerArguments = flex.ExpandStringSet(v) + } + + if v, ok := m["container_entrypoint"].(*schema.Set); ok && v.Len() > 0 { + c.ContainerEntrypoint = flex.ExpandStringSet(v) + } + + if v, ok := m["environment"].(map[string]interface{}); ok && len(v) > 0 { + c.Environment = flex.ExpandStringMap(v) + } + + if v, ok := m["post_analytics_processor_source_uri"].(string); ok && v != "" { + c.PostAnalyticsProcessorSourceUri = aws.String(v) + } + + if v, ok := m["record_preprocessor_source_uri"].(string); ok && v != "" { + c.RecordPreprocessorSourceUri = aws.String(v) + } + + return c +} + +func expandDataQualityBaselineConfig(configured []interface{}) *sagemaker.DataQualityBaselineConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.DataQualityBaselineConfig{} + + if v, ok := m["baselining_job_name"].(string); ok && v != "" { + c.BaseliningJobName = aws.String(v) + } + + if v, ok := m["constraints_resource"].([]interface{}); ok && len(v) > 0 { + c.ConstraintsResource = expandConstraintsResource(v) + } + + if v, ok := m["statistics_resource"].([]interface{}); ok && len(v) > 0 { + c.StatisticsResource = expandStatisticsResource(v) + } + + return c +} + +func expandConstraintsResource(configured []interface{}) *sagemaker.MonitoringConstraintsResource { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringConstraintsResource{} + + if v, ok := m["s3_uri"].(string); ok && v != "" { + c.S3Uri = aws.String(v) + } + + return c +} + +func expandStatisticsResource(configured []interface{}) *sagemaker.MonitoringStatisticsResource { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringStatisticsResource{} + + if v, ok := m["s3_uri"].(string); ok && v != "" { + c.S3Uri = aws.String(v) + } + return c } @@ -481,6 +1199,10 @@ func expandDataQualityJobInput(configured []interface{}) *sagemaker.DataQualityJ c.EndpointInput = expandEndpointInput(v) } + if v, ok := m["batch_transform_input"].([]interface{}); ok && len(v) > 0 { + c.BatchTransformInput = expandBatchTransformInput(v) + } + return c } @@ -497,10 +1219,154 @@ func expandEndpointInput(configured []interface{}) *sagemaker.EndpointInput { c.EndpointName = aws.String(v) } + if v, ok := m["end_time_offset"].(string); ok && v != "" { + c.EndTimeOffset = aws.String(v) + } + + if v, ok := m["features_attribute"].(string); ok && v != "" { + c.FeaturesAttribute = aws.String(v) + } + + if v, ok := m["inference_attribute"].(string); ok && v != "" { + c.InferenceAttribute = aws.String(v) + } + if v, ok := m["local_path"].(string); ok && v != "" { c.LocalPath = aws.String(v) } + if v, ok := m["probability_attribute"].(string); ok && v != "" { + c.ProbabilityAttribute = aws.String(v) + } + + if v, ok := m["probability_threshold_attribute"]; ok { + c.ProbabilityThresholdAttribute = aws.Float64(v.(float64)) + } + + if v, ok := m["s3_data_distribution_type"].(string); ok && v != "" { + c.S3DataDistributionType = aws.String(v) + } + + if v, ok := m["s3_input_mode"].(string); ok && v != "" { + c.S3InputMode = aws.String(v) + } + + if v, ok := m["start_time_offset"].(string); ok && v != "" { + c.StartTimeOffset = aws.String(v) + } + + return c +} + +func expandBatchTransformInput(configured []interface{}) *sagemaker.BatchTransformInput_ { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.BatchTransformInput_{} + + if v, ok := m["data_captured_destination_s3_uri"].(string); ok && v != "" { + c.DataCapturedDestinationS3Uri = aws.String(v) + } + + if v, ok := m["dataset_format"].([]interface{}); ok && len(v) > 0 { + c.DatasetFormat = expandDatasetFormat(v) + } + + if v, ok := m["end_time_offset"].(string); ok && v != "" { + c.EndTimeOffset = aws.String(v) + } + + if v, ok := m["features_attribute"].(string); ok && v != "" { + c.FeaturesAttribute = aws.String(v) + } + + if v, ok := m["inference_attribute"].(string); ok && v != "" { + c.InferenceAttribute = aws.String(v) + } + + if v, ok := m["local_path"].(string); ok && v != "" { + c.LocalPath = aws.String(v) + } + + if v, ok := m["probability_attribute"].(string); ok && v != "" { + c.ProbabilityAttribute = aws.String(v) + } + + if v, ok := m["probability_threshold_attribute"]; ok { + c.ProbabilityThresholdAttribute = aws.Float64(v.(float64)) + } + + if v, ok := m["s3_data_distribution_type"].(string); ok && v != "" { + c.S3DataDistributionType = aws.String(v) + } + + if v, ok := m["s3_input_mode"].(string); ok && v != "" { + c.S3InputMode = aws.String(v) + } + + if v, ok := m["start_time_offset"].(string); ok && v != "" { + c.StartTimeOffset = aws.String(v) + } + + return c +} + +func expandDatasetFormat(configured []interface{}) *sagemaker.MonitoringDatasetFormat { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringDatasetFormat{} + + if v, ok := m["csv"].([]interface{}); ok && len(v) > 0 { + c.Csv = expandCsv(v) + } + + if v, ok := m["json"].([]interface{}); ok && len(v) > 0 { + c.Json = expandJson(v) + } + + if v, ok := m["parquet"].([]interface{}); ok && len(v) > 0 { + c.Parquet = &sagemaker.MonitoringParquetDatasetFormat{} + } + + return c +} + +func expandJson(configured []interface{}) *sagemaker.MonitoringJsonDatasetFormat { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringJsonDatasetFormat{} + + if v, ok := m["line"]; ok { + c.Line = aws.Bool(v.(bool)) + } + + return c +} + +func expandCsv(configured []interface{}) *sagemaker.MonitoringCsvDatasetFormat { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringCsvDatasetFormat{} + + if v, ok := m["header"]; ok { + c.Header = aws.Bool(v.(bool)) + } + return c } @@ -513,6 +1379,10 @@ func expandDataQualityJobOutputConfig(configured []interface{}) *sagemaker.Monit c := &sagemaker.MonitoringOutputConfig{} + if v, ok := m["kms_key_id"].(string); ok && v != "" { + c.KmsKeyId = aws.String(v) + } + if v, ok := m["monitoring_outputs"].([]interface{}); ok && len(v) > 0 { c.MonitoringOutputs = expandMonitoringOutputs(v) } @@ -548,6 +1418,10 @@ func expandS3Output(configured []interface{}) *sagemaker.MonitoringS3Output { c.LocalPath = aws.String(v) } + if v, ok := m["s3_upload_mode"].(string); ok && v != "" { + c.S3UploadMode = aws.String(v) + } + if v, ok := m["s3_uri"].(string); ok && v != "" { c.S3Uri = aws.String(v) } @@ -583,13 +1457,78 @@ func expandClusterConfig(configured []interface{}) *sagemaker.MonitoringClusterC if v, ok := m["instance_count"].(int); ok && v > 0 { c.InstanceCount = aws.Int64(int64(v)) } + if v, ok := m["instance_type"].(string); ok && v != "" { c.InstanceType = aws.String(v) } + if v, ok := m["volume_kms_key_id"].(string); ok && v != "" { + c.VolumeKmsKeyId = aws.String(v) + } + if v, ok := m["volume_size_in_gb"].(int); ok && v > 0 { c.VolumeSizeInGB = aws.Int64(int64(v)) } return c } + +func expandNetworkConfig(configured []interface{}) *sagemaker.MonitoringNetworkConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringNetworkConfig{} + + if v, ok := m["enable_inter_container_traffic_encryption"]; ok { + c.EnableInterContainerTrafficEncryption = aws.Bool(v.(bool)) + } + + if v, ok := m["enable_network_isolation"]; ok { + c.EnableNetworkIsolation = aws.Bool(v.(bool)) + } + + if v, ok := m["vpc_config"].([]interface{}); ok && len(v) > 0 { + c.VpcConfig = expandVpcConfig(v) + } + + return c +} + +func expandVpcConfig(configured []interface{}) *sagemaker.VpcConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.VpcConfig{} + + if v, ok := m["security_group_ids"].(*schema.Set); ok && v.Len() > 0 { + c.SecurityGroupIds = flex.ExpandStringSet(v) + } + + if v, ok := m["subnets"].(*schema.Set); ok && v.Len() > 0 { + c.Subnets = flex.ExpandStringSet(v) + } + + return c +} + +func expandStoppingCondition(configured []interface{}) *sagemaker.StoppingCondition { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.StoppingCondition{} + + if v, ok := m["max_runtime_in_seconds"].(int); ok && v > 0 { + c.MaxRuntimeInSeconds = aws.Int64(int64(v)) + } + + return c +} From 2eab0a28cf4737ba789395facbd86cf9291ab095 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Thu, 16 Mar 2023 12:45:23 -0700 Subject: [PATCH 03/49] add model monitor data source --- .../prebuilt_ecr_image_data_source.go | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/internal/service/sagemaker/prebuilt_ecr_image_data_source.go b/internal/service/sagemaker/prebuilt_ecr_image_data_source.go index 600c188bc58b..2200399746a8 100644 --- a/internal/service/sagemaker/prebuilt_ecr_image_data_source.go +++ b/internal/service/sagemaker/prebuilt_ecr_image_data_source.go @@ -31,6 +31,8 @@ const ( repositoryLDA = "lda" // SageMaker Algorithm Linear Learner repositoryLinearLearner = "linear-learner" + // SageMaker Model Monitor + repositoryModelMonitor = "sagemaker-model-monitor-analyzer" // SageMaker Algorithm Neural Topic Model repositoryNeuralTopicModel = "ntm" // SageMaker Algorithm Object2Vec @@ -305,6 +307,35 @@ var prebuiltECRImageIDByRegion_tensorFlowServing = map[string]string{ endpoints.UsWest2RegionID: "520713654638", } +// https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-pre-built-container.html +var prebuiltECRImageIDByRegion_modelMonitor = map[string]string{ + endpoints.AfSouth1RegionID: "875698925577", + endpoints.ApEast1RegionID: "001633400207", + endpoints.ApNortheast1RegionID: "574779866223", + endpoints.ApNortheast2RegionID: "709848358524", + endpoints.ApNortheast3RegionID: "990339680094", + endpoints.ApSouth1RegionID: "126357580389", + endpoints.ApSoutheast1RegionID: "245545462676", + endpoints.ApSoutheast2RegionID: "563025443158", + endpoints.ApSoutheast3RegionID: "669540362728", + endpoints.CaCentral1RegionID: "536280801234", + endpoints.CnNorth1RegionID: "453000072557", + endpoints.CnNorthwest1RegionID: "453252182341", + endpoints.EuCentral1RegionID: "048819808253", + endpoints.EuNorth1RegionID: "895015795356", + endpoints.EuSouth1RegionID: "933208885752", + endpoints.EuWest1RegionID: "468650794304", + endpoints.EuWest2RegionID: "749857270468", + endpoints.EuWest3RegionID: "680080141114", + endpoints.MeSouth1RegionID: "607024016150", + endpoints.SaEast1RegionID: "539772159869", + endpoints.UsEast1RegionID: "156813124566", + endpoints.UsEast2RegionID: "777275614652", + endpoints.UsGovWest1RegionID: "362178532790", + endpoints.UsWest1RegionID: "890145073186", + endpoints.UsWest2RegionID: "159807026194", +} + // @SDKDataSource("aws_sagemaker_prebuilt_ecr_image") func DataSourcePrebuiltECRImage() *schema.Resource { return &schema.Resource{ @@ -323,6 +354,7 @@ func DataSourcePrebuiltECRImage() *schema.Resource { repositoryKNearestNeighbor, repositoryLDA, repositoryLinearLearner, + repositoryModelMonitor, repositoryMXNetInference, repositoryMXNetInferenceEIA, repositoryMXNetTraining, @@ -406,6 +438,8 @@ func dataSourcePrebuiltECRImageRead(ctx context.Context, d *schema.ResourceData, id = prebuiltECRImageIDByRegion_deepAR[region] case repositoryLDA: id = prebuiltECRImageIDByRegion_lda[region] + case repositoryModelMonitor: + id = prebuiltECRImageIDByRegion_modelMonitor[region] case repositoryXGBoost: id = prebuiltECRImageIDByRegion_xgBoost[region] case repositoryScikitLearn, repositorySparkML: @@ -441,5 +475,9 @@ func dataSourcePrebuiltECRImageRead(ctx context.Context, d *schema.ResourceData, } func PrebuiltECRImageCreatePath(id, region, suffix, repo, imageTag string) string { + if imageTag == "" { + return fmt.Sprintf("%s.dkr.ecr.%s.%s/%s", id, region, suffix, repo) + } + return fmt.Sprintf("%s.dkr.ecr.%s.%s/%s:%s", id, region, suffix, repo, imageTag) } From 87c4f6607d12d65c00149b9cdd37635d1bbe390a Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Thu, 16 Mar 2023 12:45:45 -0700 Subject: [PATCH 04/49] bug fixes --- .../sagemaker/data_quality_job_definition.go | 38 ++++++++++--------- internal/service/sagemaker/find.go | 2 +- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index c327a4482904..bb1162eea27e 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -233,7 +233,7 @@ func ResourceDataQualityJobDefinition() *schema.Resource { }, "local_path": { Type: schema.TypeString, - Required: true, + Optional: true, Default: "/opt/ml/processing/input", ForceNew: true, ValidateFunc: validation.All( @@ -249,6 +249,7 @@ func ResourceDataQualityJobDefinition() *schema.Resource { "probability_threshold_attribute": { Type: schema.TypeFloat, Optional: true, + Computed: true, ForceNew: true, ValidateFunc: validation.FloatAtLeast(0), }, @@ -256,12 +257,14 @@ func ResourceDataQualityJobDefinition() *schema.Resource { Type: schema.TypeString, ForceNew: true, Optional: true, + Computed: true, ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3DataDistributionType_Values(), false), }, "s3_input_mode": { Type: schema.TypeString, ForceNew: true, Optional: true, + Computed: true, ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3InputMode_Values(), false), }, "start_time_offset": { @@ -302,7 +305,7 @@ func ResourceDataQualityJobDefinition() *schema.Resource { }, "local_path": { Type: schema.TypeString, - Required: true, + Optional: true, Default: "/opt/ml/processing/input", ForceNew: true, ValidateFunc: validation.All( @@ -325,12 +328,14 @@ func ResourceDataQualityJobDefinition() *schema.Resource { Type: schema.TypeString, ForceNew: true, Optional: true, + Computed: true, ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3DataDistributionType_Values(), false), }, "s3_input_mode": { Type: schema.TypeString, ForceNew: true, Optional: true, + Computed: true, ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3InputMode_Values(), false), }, "start_time_offset": { @@ -373,7 +378,7 @@ func ResourceDataQualityJobDefinition() *schema.Resource { Schema: map[string]*schema.Schema{ "local_path": { Type: schema.TypeString, - Required: true, + Optional: true, Default: "/opt/ml/processing/output", ForceNew: true, ValidateFunc: validation.All( @@ -385,6 +390,7 @@ func ResourceDataQualityJobDefinition() *schema.Resource { Type: schema.TypeString, ForceNew: true, Optional: true, + Computed: true, ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3UploadMode_Values(), false), }, "s3_uri": { @@ -459,7 +465,7 @@ func ResourceDataQualityJobDefinition() *schema.Resource { "network_config": { Type: schema.TypeList, MaxItems: 1, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -511,12 +517,14 @@ func ResourceDataQualityJobDefinition() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "max_runtime_in_seconds": { Type: schema.TypeInt, - Required: true, + Optional: true, + Computed: true, ForceNew: true, ValidateFunc: validation.IntBetween(1, 3600), }, @@ -1000,17 +1008,13 @@ func flattenClusterConfig(clusterConfig *sagemaker.MonitoringClusterConfig) []ma return []map[string]interface{}{spec} } -func flattenNetworkConfig(networkConfig *sagemaker.NetworkConfig) []map[string]interface{} { +func flattenNetworkConfig(networkConfig *sagemaker.MonitoringNetworkConfig) []map[string]interface{} { if networkConfig == nil { return []map[string]interface{}{} } spec := map[string]interface{}{} - if networkConfig.ClusterConfig != nil { - spec["cluster_config"] = flattenClusterConfig(jobResources.ClusterConfig) - } - if networkConfig.EnableInterContainerTrafficEncryption != nil { spec["enable_inter_container_traffic_encryption"] = aws.BoolValue(networkConfig.EnableInterContainerTrafficEncryption) } @@ -1044,7 +1048,7 @@ func flattenVpcConfig(vpcConfig *sagemaker.VpcConfig) []map[string]interface{} { return []map[string]interface{}{spec} } -func flattenStoppingCondition(stoppingCondition *sagemaker.StoppingCondition) []map[string]interface{} { +func flattenStoppingCondition(stoppingCondition *sagemaker.MonitoringStoppingCondition) []map[string]interface{} { if stoppingCondition == nil { return []map[string]interface{}{} } @@ -1239,8 +1243,8 @@ func expandEndpointInput(configured []interface{}) *sagemaker.EndpointInput { c.ProbabilityAttribute = aws.String(v) } - if v, ok := m["probability_threshold_attribute"]; ok { - c.ProbabilityThresholdAttribute = aws.Float64(v.(float64)) + if v, ok := m["probability_threshold_attribute"].(float64); ok && v > 0 { + c.ProbabilityThresholdAttribute = aws.Float64(v) } if v, ok := m["s3_data_distribution_type"].(string); ok && v != "" { @@ -1295,8 +1299,8 @@ func expandBatchTransformInput(configured []interface{}) *sagemaker.BatchTransfo c.ProbabilityAttribute = aws.String(v) } - if v, ok := m["probability_threshold_attribute"]; ok { - c.ProbabilityThresholdAttribute = aws.Float64(v.(float64)) + if v, ok := m["probability_threshold_attribute"].(float64); ok && v > 0 { + c.ProbabilityThresholdAttribute = aws.Float64(v) } if v, ok := m["s3_data_distribution_type"].(string); ok && v != "" { @@ -1517,14 +1521,14 @@ func expandVpcConfig(configured []interface{}) *sagemaker.VpcConfig { return c } -func expandStoppingCondition(configured []interface{}) *sagemaker.StoppingCondition { +func expandStoppingCondition(configured []interface{}) *sagemaker.MonitoringStoppingCondition { if len(configured) == 0 { return nil } m := configured[0].(map[string]interface{}) - c := &sagemaker.StoppingCondition{} + c := &sagemaker.MonitoringStoppingCondition{} if v, ok := m["max_runtime_in_seconds"].(int); ok && v > 0 { c.MaxRuntimeInSeconds = aws.Int64(int64(v)) diff --git a/internal/service/sagemaker/find.go b/internal/service/sagemaker/find.go index ff10d9900231..ab003ec98f0d 100644 --- a/internal/service/sagemaker/find.go +++ b/internal/service/sagemaker/find.go @@ -504,7 +504,7 @@ func FindDataQualityJobDefinitionByName(ctx context.Context, conn *sagemaker.Sag output, err := conn.DescribeDataQualityJobDefinitionWithContext(ctx, input) - if tfawserr.ErrMessageContains(err, ErrCodeValidationException, "Could not find data quality job definition") { + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { return nil, &resource.NotFoundError{ LastError: err, LastRequest: input, From 4eef786afeef68fd9fa9ed7835827f5e4ecc0ee9 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Thu, 16 Mar 2023 12:46:01 -0700 Subject: [PATCH 05/49] add first data quality job definition test --- .../data_quality_job_definition_test.go | 273 ++++++++++++++++++ 1 file changed, 273 insertions(+) create mode 100644 internal/service/sagemaker/data_quality_job_definition_test.go diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go new file mode 100644 index 000000000000..2221a4e7e190 --- /dev/null +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -0,0 +1,273 @@ +package sagemaker_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/sagemaker" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfsagemaker "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func TestAccSageMakerDataQualityJobDefinition_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpoint_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_input.0.endpoint_input.0.endpoint_name", "aws_sagemaker_endpoint.test", "name"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckDataQualityJobDefinitionDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sagemaker_data_quality_job_definition" { + continue + } + + _, err := tfsagemaker.FindDataQualityJobDefinitionByName(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("SageMaker Data Quality Job Definition (%s) still exists", rs.Primary.ID) + } + return nil + } +} + +func testAccCheckDataQualityJobDefinitionExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("no SageMaker Data Quality Job Definition ID is set") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn() + _, err := tfsagemaker.FindDataQualityJobDefinitionByName(ctx, conn, rs.Primary.ID) + + return err + } +} + +func testAccEndpoint_Base(rName string) string { + return fmt.Sprintf(` + +provider "aws" { + region = "us-west-2" + + default_tags { + tags = { + "adsk:moniker" = "AMPSDEMO-C-UW2" + } + } +} + +data "aws_caller_identity" "current" {} + +data "aws_iam_policy" "boundary" { + arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/ADSK-Boundary" +} + +data "aws_iam_policy_document" "access" { + statement { + effect = "Allow" + + actions = [ + "cloudwatch:PutMetricData", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:CreateLogGroup", + "logs:DescribeLogStreams", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "s3:GetObject", + ] + + resources = ["*"] + } +} + +data "aws_partition" "current" {} + +data "aws_iam_policy_document" "assume_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.${data.aws_partition.current.dns_suffix}"] + } + } +} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + assume_role_policy = data.aws_iam_policy_document.assume_role.json + permissions_boundary = data.aws_iam_policy.boundary.arn +} + +resource "aws_iam_role_policy" "test" { + role = aws_iam_role.test.name + policy = data.aws_iam_policy_document.access.json +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.id + key = "model.tar.gz" + source = "test-fixtures/sagemaker-tensorflow-serving-test-model.tar.gz" +} + +data "aws_sagemaker_prebuilt_ecr_image" "test" { + repository_name = "sagemaker-tensorflow-serving" + image_tag = "1.12-cpu" +} + +resource "aws_sagemaker_model" "test" { + name = %[1]q + execution_role_arn = aws_iam_role.test.arn + + primary_container { + image = data.aws_sagemaker_prebuilt_ecr_image.test.registry_path + model_data_url = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/${aws_s3_object.test.key}" + } + + depends_on = [aws_iam_role_policy.test] +} + +resource "aws_sagemaker_endpoint_configuration" "test" { + name = %[1]q + + production_variants { + initial_instance_count = 1 + initial_variant_weight = 1 + instance_type = "ml.t2.medium" + model_name = aws_sagemaker_model.test.name + variant_name = "variant-1" + } + + data_capture_config { + initial_sampling_percentage = 100 + destination_s3_uri = "s3://${aws_s3_bucket.test.bucket_regional_domain_name}/capture" + capture_options { + capture_mode = "Input" + } + capture_options { + capture_mode = "Output" + } + } +} + +resource "aws_sagemaker_endpoint" "test" { + endpoint_config_name = aws_sagemaker_endpoint_configuration.test.name + name = %[1]q +} +`, rName) +} + +func testAccEndpoint_basic(rName string) string { + return testAccEndpoint_Base(rName) + fmt.Sprintf(` +locals { + output_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" +} + +data "aws_sagemaker_prebuilt_ecr_image" "monitor" { + repository_name = "sagemaker-model-monitor-analyzer" + image_tag = "" +} + +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + endpoint_input { + endpoint_name = aws_sagemaker_endpoint.test.name + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = local.output_s3_uri + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName) +} From 1d30a30e7ababe4fcedbff92e1d4d4a45ff0b0ab Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Thu, 16 Mar 2023 17:23:31 -0700 Subject: [PATCH 06/49] add disappears test --- .../sagemaker/data_quality_job_definition.go | 7 ++-- .../data_quality_job_definition_test.go | 41 +++++++++++++++++++ 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index bb1162eea27e..1bba4079b056 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -599,7 +598,7 @@ func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.Resourc jobDefinition, err := FindDataQualityJobDefinitionByName(ctx, conn, d.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { log.Printf("[WARN] SageMaker Data Quality Job Definition (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -1073,7 +1072,7 @@ func resourceDataQualityJobDefinitionUpdate(ctx context.Context, d *schema.Resou return sdkdiag.AppendErrorf(diags, "updating SageMaker Data Quality Job Definition (%s) tags: %s", d.Id(), err) } } - return append(diags, resourceEndpointConfigurationRead(ctx, d, meta)...) + return append(diags, resourceDataQualityJobDefinitionRead(ctx, d, meta)...) } func resourceDataQualityJobDefinitionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -1087,7 +1086,7 @@ func resourceDataQualityJobDefinitionDelete(ctx context.Context, d *schema.Resou _, err := conn.DeleteDataQualityJobDefinitionWithContext(ctx, deleteOpts) - if tfawserr.ErrMessageContains(err, "ValidationException", "Could not find data quality job definition") { + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { return diags } diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 2221a4e7e190..b76dca31e443 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -62,6 +62,47 @@ func TestAccSageMakerDataQualityJobDefinition_basic(t *testing.T) { }) } +func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpoint_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsagemaker.ResourceDataQualityJobDefinition(), resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsagemaker.ResourceDataQualityJobDefinition(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +// TO ADD: +// DataQualityAppSpecification optional +// container_arguments +// container_entrypoint +// environment +// post_analytics_processor_source_uri +// record_preprocessor_source_uri +// DataQualityBaselineConfig required +// DataQualityBaselineConfig optional +// DataQualityJobInput BatchTransformInput required +// DataQualityJobInput BatchTransformInput optional +// DataQualityJobOutputConfig optional +// JobResources optional +// NetworkConfig required +// NetworkConfig optional +// StoppingCondition optional + func testAccCheckDataQualityJobDefinitionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn() From 13cf0e63d7f39cc0251e2b7627eb87b4af4affb0 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Thu, 16 Mar 2023 19:10:37 -0700 Subject: [PATCH 07/49] adding more to base test --- .../data_quality_job_definition_test.go | 51 +++++++++++++++++-- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index b76dca31e443..881f042054af 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -3,6 +3,7 @@ package sagemaker_test import ( "context" "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/service/sagemaker" @@ -37,9 +38,13 @@ func TestAccSageMakerDataQualityJobDefinition_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_input.0.endpoint_input.0.endpoint_name", "aws_sagemaker_endpoint.test", "name"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_data_distribution_type", "FullyReplicated"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_input_mode", "File"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), @@ -87,7 +92,7 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { } // TO ADD: -// DataQualityAppSpecification optional +// DataQualityAppSpecification // container_arguments // container_entrypoint // environment @@ -95,13 +100,49 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { // record_preprocessor_source_uri // DataQualityBaselineConfig required // DataQualityBaselineConfig optional -// DataQualityJobInput BatchTransformInput required -// DataQualityJobInput BatchTransformInput optional +// baselining_job_name +// constraints_resource +// statistics_resource +// DataQualityJobInput +// batch_transform_input (required) +// dataset_format +// csv +// header +// json +// line +// parquet +// end_time_offset +// features_attribute +// inference_attribute +// local_path +// probability_attribute +// s3_data_distribution_type +// s3_input_mode +// start_time_offset +// endpoint_input (required) +// end_time_offset +// features_attribute +// inference_attribute +// local_path +// probability_attribute +// s3_data_distribution_type +// s3_input_mode +// start_time_offset // DataQualityJobOutputConfig optional -// JobResources optional +// kms_key_id +// monitoring_outputs (multiple) +// s3_output +// local_path +// s3_upload_mode +// JobResources +// cluster_config +// volume_kms_key_id // NetworkConfig required // NetworkConfig optional -// StoppingCondition optional +// enable_inter_container_traffic_encryption +// enable_network_isolation +// StoppingCondition required +// func testAccCheckDataQualityJobDefinitionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { From cb58862cab23f868e032df1ee3e1c2c0ee5af0cd Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Thu, 16 Mar 2023 20:48:23 -0700 Subject: [PATCH 08/49] add appspc optional test --- .../data_quality_job_definition_test.go | 100 ++++++++++++++++-- 1 file changed, 93 insertions(+), 7 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 881f042054af..28cae11c4b84 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -67,6 +67,60 @@ func TestAccSageMakerDataQualityJobDefinition_basic(t *testing.T) { }) } +func TestAccSageMakerDataQualityJobDefinition_appSpecification_optional(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpoint_appSpecification_optional(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.0.environment.%", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.0.environment.foo", "bar"), + resource.TestMatchResourceAttr(resourceName, "data_quality_app_specification.0.record_preprocessor_source_uri", regexp.MustCompile("pre.sh")), + resource.TestMatchResourceAttr(resourceName, "data_quality_app_specification.0.post_analytics_processor_source_uri", regexp.MustCompile("post.sh")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_input.0.endpoint_input.0.endpoint_name", "aws_sagemaker_endpoint.test", "name"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_data_distribution_type", "FullyReplicated"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_input_mode", "File"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -311,24 +365,56 @@ resource "aws_sagemaker_endpoint" "test" { endpoint_config_name = aws_sagemaker_endpoint_configuration.test.name name = %[1]q } + +data "aws_sagemaker_prebuilt_ecr_image" "monitor" { + repository_name = "sagemaker-model-monitor-analyzer" + image_tag = "" +} `, rName) } func testAccEndpoint_basic(rName string) string { return testAccEndpoint_Base(rName) + fmt.Sprintf(` -locals { - output_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + endpoint_input { + endpoint_name = aws_sagemaker_endpoint.test.name + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn } - -data "aws_sagemaker_prebuilt_ecr_image" "monitor" { - repository_name = "sagemaker-model-monitor-analyzer" - image_tag = "" +`, rName) } +func testAccEndpoint_appSpecification_optional(rName string) string { + return testAccEndpoint_Base(rName) + fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + environment = { + foo = "bar" + } + record_preprocessor_source_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/pre.sh" + post_analytics_processor_source_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/post.sh" } data_quality_job_input { endpoint_input { @@ -338,7 +424,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = local.output_s3_uri + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } From 8746235179dec365165521139a6f0944b2f70660 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Thu, 16 Mar 2023 21:31:25 -0700 Subject: [PATCH 09/49] add baseline config test --- .../data_quality_job_definition_test.go | 102 +++++++++++++++++- 1 file changed, 98 insertions(+), 4 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 28cae11c4b84..41ba13095ffb 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -121,6 +121,62 @@ func TestAccSageMakerDataQualityJobDefinition_appSpecification_optional(t *testi }, }) } + +func TestAccSageMakerDataQualityJobDefinition_baselineConfig(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpoint_baselineConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_input.0.endpoint_input.0.endpoint_name", "aws_sagemaker_endpoint.test", "name"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_data_distribution_type", "FullyReplicated"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_input_mode", "File"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.0.constraints_resource.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_baseline_config.0.constraints_resource.0.s3_uri", regexp.MustCompile("constraints")), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.0.statistics_resource.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_baseline_config.0.statistics_resource.0.s3_uri", regexp.MustCompile("statistics")), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -145,13 +201,12 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { }) } -// TO ADD: +// Issues: // DataQualityAppSpecification // container_arguments // container_entrypoint -// environment -// post_analytics_processor_source_uri -// record_preprocessor_source_uri + +// TO ADD: // DataQualityBaselineConfig required // DataQualityBaselineConfig optional // baselining_job_name @@ -439,3 +494,42 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } `, rName) } + +func testAccEndpoint_baselineConfig(rName string) string { + return testAccEndpoint_Base(rName) + fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_baseline_config { + constraints_resource { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/constraints" + } + statistics_resource { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/statistics" + } + } + data_quality_job_input { + endpoint_input { + endpoint_name = aws_sagemaker_endpoint.test.name + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName) +} From b5db420c6d3da2115d8dbac7bfc160a70fe8cd59 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 11:25:27 -0700 Subject: [PATCH 10/49] fixed a couple of bugs with csv field --- .../service/sagemaker/data_quality_job_definition.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index 1bba4079b056..4c56db3eda08 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -773,7 +773,7 @@ func flattenBatchTransformInput(transformInput *sagemaker.BatchTransformInput_) } if transformInput.DataCapturedDestinationS3Uri != nil { - fInput["local_path"] = aws.StringValue(transformInput.DataCapturedDestinationS3Uri) + fInput["data_captured_destination_s3_uri"] = aws.StringValue(transformInput.DataCapturedDestinationS3Uri) } if transformInput.DatasetFormat != nil { @@ -1362,10 +1362,13 @@ func expandCsv(configured []interface{}) *sagemaker.MonitoringCsvDatasetFormat { return nil } - m := configured[0].(map[string]interface{}) - c := &sagemaker.MonitoringCsvDatasetFormat{} + if configured[0] == nil { + return c + } + + m := configured[0].(map[string]interface{}) if v, ok := m["header"]; ok { c.Header = aws.Bool(v.(bool)) } From f6b54d70f75f905ff3c47e9da3f8af1d46f02917 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 11:25:54 -0700 Subject: [PATCH 11/49] adding batch transform test --- .../data_quality_job_definition_test.go | 178 +++++++++++++++++- 1 file changed, 170 insertions(+), 8 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 41ba13095ffb..97e9c07cecaa 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -177,6 +177,56 @@ func TestAccSageMakerDataQualityJobDefinition_baselineConfig(t *testing.T) { }) } +func TestAccSageMakerDataQualityJobDefinition_batchTransform(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpoint_batchTransform(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -205,13 +255,10 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { // DataQualityAppSpecification // container_arguments // container_entrypoint - -// TO ADD: -// DataQualityBaselineConfig required // DataQualityBaselineConfig optional // baselining_job_name -// constraints_resource -// statistics_resource + +// TO ADD: // DataQualityJobInput // batch_transform_input (required) // dataset_format @@ -296,6 +343,87 @@ func testAccCheckDataQualityJobDefinitionExists(ctx context.Context, n string) r } } +func testAccBatchTransform_Base(rName string) string { + return fmt.Sprintf(` + +provider "aws" { + region = "us-west-2" + + default_tags { + tags = { + "adsk:moniker" = "AMPSDEMO-C-UW2" + } + } +} + +data "aws_caller_identity" "current" {} + +data "aws_iam_policy" "boundary" { + arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/ADSK-Boundary" +} + +data "aws_iam_policy_document" "access" { + statement { + effect = "Allow" + + actions = [ + "cloudwatch:PutMetricData", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:CreateLogGroup", + "logs:DescribeLogStreams", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "s3:GetObject", + ] + + resources = ["*"] + } +} + +data "aws_partition" "current" {} + +data "aws_iam_policy_document" "assume_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.${data.aws_partition.current.dns_suffix}"] + } + } +} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + assume_role_policy = data.aws_iam_policy_document.assume_role.json + permissions_boundary = data.aws_iam_policy.boundary.arn +} + +resource "aws_iam_role_policy" "test" { + role = aws_iam_role.test.name + policy = data.aws_iam_policy_document.access.json +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +data "aws_sagemaker_prebuilt_ecr_image" "monitor" { + repository_name = "sagemaker-model-monitor-analyzer" + image_tag = "" +} +`, rName) +} + func testAccEndpoint_Base(rName string) string { return fmt.Sprintf(` @@ -443,7 +571,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } @@ -479,7 +607,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } @@ -518,7 +646,41 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName) +} + +func testAccEndpoint_batchTransform(rName string) string { + return testAccBatchTransform_Base(rName) + fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } From 8b6ff11f772aacf6bd53ec798192e847dba87711 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 15:42:21 -0700 Subject: [PATCH 12/49] fixing issues with transform job input --- .../sagemaker/data_quality_job_definition.go | 104 +----------------- 1 file changed, 5 insertions(+), 99 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index 4c56db3eda08..841711cf8dd3 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -203,33 +203,9 @@ func ResourceDataQualityJobDefinition() *schema.Resource { }, }, }, - "parquet": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{}, - }, - }, }, }, }, - "end_time_offset": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - "features_attribute": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - "inference_attribute": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, "local_path": { Type: schema.TypeString, Optional: true, @@ -240,18 +216,6 @@ func ResourceDataQualityJobDefinition() *schema.Resource { validation.StringMatch(regexp.MustCompile(`^\/opt\/ml\/processing\/.*`), "Must start with `/opt/ml/processing`."), ), }, - "probability_attribute": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - "probability_threshold_attribute": { - Type: schema.TypeFloat, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.FloatAtLeast(0), - }, "s3_data_distribution_type": { Type: schema.TypeString, ForceNew: true, @@ -266,11 +230,6 @@ func ResourceDataQualityJobDefinition() *schema.Resource { Computed: true, ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3InputMode_Values(), false), }, - "start_time_offset": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, }, }, }, @@ -780,26 +739,6 @@ func flattenBatchTransformInput(transformInput *sagemaker.BatchTransformInput_) fInput["dataset_format"] = flattenDatasetFormat(transformInput.DatasetFormat) } - if transformInput.EndTimeOffset != nil { - fInput["end_time_offset"] = aws.StringValue(transformInput.EndTimeOffset) - } - - if transformInput.FeaturesAttribute != nil { - fInput["features_attribute"] = aws.StringValue(transformInput.FeaturesAttribute) - } - - if transformInput.InferenceAttribute != nil { - fInput["inference_attribute"] = aws.StringValue(transformInput.InferenceAttribute) - } - - if transformInput.ProbabilityAttribute != nil { - fInput["probability_attribute"] = aws.StringValue(transformInput.ProbabilityAttribute) - } - - if transformInput.ProbabilityThresholdAttribute != nil { - fInput["probability_threshold_attribute"] = aws.Float64Value(transformInput.ProbabilityThresholdAttribute) - } - if transformInput.S3DataDistributionType != nil { fInput["s3_data_distribution_type"] = aws.StringValue(transformInput.S3DataDistributionType) } @@ -808,10 +747,6 @@ func flattenBatchTransformInput(transformInput *sagemaker.BatchTransformInput_) fInput["s3_input_mode"] = aws.StringValue(transformInput.S3InputMode) } - if transformInput.StartTimeOffset != nil { - fInput["start_time_offset"] = aws.StringValue(transformInput.StartTimeOffset) - } - return []map[string]interface{}{fInput} } @@ -830,10 +765,6 @@ func flattenDatasetFormat(datasetFormat *sagemaker.MonitoringDatasetFormat) []ma fFormat["json"] = flattenJson(datasetFormat.Json) } - if datasetFormat.Parquet != nil { - fFormat["parquet"] = []map[string]interface{}{} - } - return []map[string]interface{}{fFormat} } @@ -1278,30 +1209,10 @@ func expandBatchTransformInput(configured []interface{}) *sagemaker.BatchTransfo c.DatasetFormat = expandDatasetFormat(v) } - if v, ok := m["end_time_offset"].(string); ok && v != "" { - c.EndTimeOffset = aws.String(v) - } - - if v, ok := m["features_attribute"].(string); ok && v != "" { - c.FeaturesAttribute = aws.String(v) - } - - if v, ok := m["inference_attribute"].(string); ok && v != "" { - c.InferenceAttribute = aws.String(v) - } - if v, ok := m["local_path"].(string); ok && v != "" { c.LocalPath = aws.String(v) } - if v, ok := m["probability_attribute"].(string); ok && v != "" { - c.ProbabilityAttribute = aws.String(v) - } - - if v, ok := m["probability_threshold_attribute"].(float64); ok && v > 0 { - c.ProbabilityThresholdAttribute = aws.Float64(v) - } - if v, ok := m["s3_data_distribution_type"].(string); ok && v != "" { c.S3DataDistributionType = aws.String(v) } @@ -1310,10 +1221,6 @@ func expandBatchTransformInput(configured []interface{}) *sagemaker.BatchTransfo c.S3InputMode = aws.String(v) } - if v, ok := m["start_time_offset"].(string); ok && v != "" { - c.StartTimeOffset = aws.String(v) - } - return c } @@ -1334,10 +1241,6 @@ func expandDatasetFormat(configured []interface{}) *sagemaker.MonitoringDatasetF c.Json = expandJson(v) } - if v, ok := m["parquet"].([]interface{}); ok && len(v) > 0 { - c.Parquet = &sagemaker.MonitoringParquetDatasetFormat{} - } - return c } @@ -1346,10 +1249,13 @@ func expandJson(configured []interface{}) *sagemaker.MonitoringJsonDatasetFormat return nil } - m := configured[0].(map[string]interface{}) - c := &sagemaker.MonitoringJsonDatasetFormat{} + if configured[0] == nil { + return c + } + + m := configured[0].(map[string]interface{}) if v, ok := m["line"]; ok { c.Line = aws.Bool(v.(bool)) } From 2e0b3a7a8f28b61c0403f9989286d98d9a8dcc92 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 15:42:35 -0700 Subject: [PATCH 13/49] adding transform job tests --- .../data_quality_job_definition_test.go | 405 +++++++++++++++++- 1 file changed, 387 insertions(+), 18 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 97e9c07cecaa..02ce93c4056d 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -189,7 +189,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform(t *testing.T) { CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccEndpoint_batchTransform(rName), + Config: testAccBatchTransform_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "name", rName), @@ -227,6 +227,215 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform(t *testing.T) { }, }) } + +func TestAccSageMakerDataQualityJobDefinition_batchTransform_csvHeader(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBatchTransform_csvHeader(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.0.header", "true"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSageMakerDataQualityJobDefinition_batchTransform_json(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBatchTransform_json(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.json.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSageMakerDataQualityJobDefinition_batchTransform_jsonLine(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBatchTransform_jsonLine(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.json.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.json.0.line", "true"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSageMakerDataQualityJobDefinition_batchTransform_optional(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBatchTransform_optional(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.local_path", "/opt/ml/processing/local_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.s3_data_distribution_type", "ShardedByS3Key"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.s3_input_mode", "Pipe"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -260,21 +469,6 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { // TO ADD: // DataQualityJobInput -// batch_transform_input (required) -// dataset_format -// csv -// header -// json -// line -// parquet -// end_time_offset -// features_attribute -// inference_attribute -// local_path -// probability_attribute -// s3_data_distribution_type -// s3_input_mode -// start_time_offset // endpoint_input (required) // end_time_offset // features_attribute @@ -662,8 +856,40 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName) } -func testAccEndpoint_batchTransform(rName string) string { +func testAccBatchTransform_basicTransformTemplate(rName string, dFormat string) string { return testAccBatchTransform_Base(rName) + fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + %[2]s + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName, dFormat) +} + +func testAccBatchTransform_basic(rName string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -693,5 +919,148 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } role_arn = aws_iam_role.test.arn } -`, rName) +`, rName)) +} + +func testAccBatchTransform_csvHeader(rName string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv { + header = true + } + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} + +func testAccBatchTransform_json(rName string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + json {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} + +func testAccBatchTransform_jsonLine(rName string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + json { + line = true + } + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} + +func testAccBatchTransform_optional(rName string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + local_path = "/opt/ml/processing/local_path" + s3_data_distribution_type = "ShardedByS3Key" + s3_input_mode = "Pipe" + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) } From eabcd249c383458ea157efebc49ffd4bff034bd6 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 16:14:17 -0700 Subject: [PATCH 14/49] fixes to endpoint optional parameters --- .../sagemaker/data_quality_job_definition.go | 79 ------------------- 1 file changed, 79 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index 841711cf8dd3..dba57dd3a75d 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -240,27 +240,12 @@ func ResourceDataQualityJobDefinition() *schema.Resource { ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "end_time_offset": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, "endpoint_name": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validName, }, - "features_attribute": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - "inference_attribute": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, "local_path": { Type: schema.TypeString, Optional: true, @@ -271,17 +256,6 @@ func ResourceDataQualityJobDefinition() *schema.Resource { validation.StringMatch(regexp.MustCompile(`^\/opt\/ml\/processing\/.*`), "Must start with `/opt/ml/processing`."), ), }, - "probability_attribute": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - "probability_threshold_attribute": { - Type: schema.TypeFloat, - Optional: true, - ForceNew: true, - ValidateFunc: validation.FloatAtLeast(0), - }, "s3_data_distribution_type": { Type: schema.TypeString, ForceNew: true, @@ -296,11 +270,6 @@ func ResourceDataQualityJobDefinition() *schema.Resource { Computed: true, ValidateFunc: validation.StringInSlice(sagemaker.ProcessingS3InputMode_Values(), false), }, - "start_time_offset": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, }, }, }, @@ -811,26 +780,6 @@ func flattenEndpointInput(endpointInput *sagemaker.EndpointInput) []map[string]i spec["local_path"] = aws.StringValue(endpointInput.LocalPath) } - if endpointInput.EndTimeOffset != nil { - spec["end_time_offset"] = aws.StringValue(endpointInput.EndTimeOffset) - } - - if endpointInput.FeaturesAttribute != nil { - spec["features_attribute"] = aws.StringValue(endpointInput.FeaturesAttribute) - } - - if endpointInput.InferenceAttribute != nil { - spec["inference_attribute"] = aws.StringValue(endpointInput.InferenceAttribute) - } - - if endpointInput.ProbabilityAttribute != nil { - spec["probability_attribute"] = aws.StringValue(endpointInput.ProbabilityAttribute) - } - - if endpointInput.ProbabilityThresholdAttribute != nil { - spec["probability_threshold_attribute"] = aws.Float64Value(endpointInput.ProbabilityThresholdAttribute) - } - if endpointInput.S3DataDistributionType != nil { spec["s3_data_distribution_type"] = aws.StringValue(endpointInput.S3DataDistributionType) } @@ -839,10 +788,6 @@ func flattenEndpointInput(endpointInput *sagemaker.EndpointInput) []map[string]i spec["s3_input_mode"] = aws.StringValue(endpointInput.S3InputMode) } - if endpointInput.StartTimeOffset != nil { - spec["start_time_offset"] = aws.StringValue(endpointInput.StartTimeOffset) - } - return []map[string]interface{}{spec} } @@ -1153,30 +1098,10 @@ func expandEndpointInput(configured []interface{}) *sagemaker.EndpointInput { c.EndpointName = aws.String(v) } - if v, ok := m["end_time_offset"].(string); ok && v != "" { - c.EndTimeOffset = aws.String(v) - } - - if v, ok := m["features_attribute"].(string); ok && v != "" { - c.FeaturesAttribute = aws.String(v) - } - - if v, ok := m["inference_attribute"].(string); ok && v != "" { - c.InferenceAttribute = aws.String(v) - } - if v, ok := m["local_path"].(string); ok && v != "" { c.LocalPath = aws.String(v) } - if v, ok := m["probability_attribute"].(string); ok && v != "" { - c.ProbabilityAttribute = aws.String(v) - } - - if v, ok := m["probability_threshold_attribute"].(float64); ok && v > 0 { - c.ProbabilityThresholdAttribute = aws.Float64(v) - } - if v, ok := m["s3_data_distribution_type"].(string); ok && v != "" { c.S3DataDistributionType = aws.String(v) } @@ -1185,10 +1110,6 @@ func expandEndpointInput(configured []interface{}) *sagemaker.EndpointInput { c.S3InputMode = aws.String(v) } - if v, ok := m["start_time_offset"].(string); ok && v != "" { - c.StartTimeOffset = aws.String(v) - } - return c } From 9d45d3db4766909c1d8c2e5efff0bb36c902109e Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 16:14:40 -0700 Subject: [PATCH 15/49] adding endpoint optional test --- .../data_quality_job_definition_test.go | 99 ++++++++++++++++--- 1 file changed, 88 insertions(+), 11 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 02ce93c4056d..c74dff7e2c15 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -16,7 +16,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func TestAccSageMakerDataQualityJobDefinition_basic(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_endpoint(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -436,6 +436,59 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_optional(t *testing }, }) } + +func TestAccSageMakerDataQualityJobDefinition_endpoint_optional(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpoint_optional(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_input.0.endpoint_input.0.endpoint_name", "aws_sagemaker_endpoint.test", "name"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.local_path", "/opt/ml/processing/local_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_data_distribution_type", "ShardedByS3Key"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_input_mode", "Pipe"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -468,16 +521,6 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { // baselining_job_name // TO ADD: -// DataQualityJobInput -// endpoint_input (required) -// end_time_offset -// features_attribute -// inference_attribute -// local_path -// probability_attribute -// s3_data_distribution_type -// s3_input_mode -// start_time_offset // DataQualityJobOutputConfig optional // kms_key_id // monitoring_outputs (multiple) @@ -1064,3 +1107,37 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } `, rName)) } + +func testAccEndpoint_optional(rName string) string { + return testAccEndpoint_Base(rName) + fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + endpoint_input { + endpoint_name = aws_sagemaker_endpoint.test.name + local_path = "/opt/ml/processing/local_path" + s3_data_distribution_type = "ShardedByS3Key" + s3_input_mode = "Pipe" + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName) +} From b988e74c6389c6ee74bcbb9eda21c6c0384b21c4 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 16:23:07 -0700 Subject: [PATCH 16/49] add outputconfig kmskeyid test --- .../data_quality_job_definition_test.go | 92 +++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index c74dff7e2c15..1d67b38fee54 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -489,6 +489,58 @@ func TestAccSageMakerDataQualityJobDefinition_endpoint_optional(t *testing.T) { }) } +func TestAccSageMakerDataQualityJobDefinition_batchTransform_ouputConfig_kmsKeyId(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBatchTransform_outputConfig_kmsKeyId(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_output_config.0.kms_key_id", "aws_kms_key.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1141,3 +1193,43 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } `, rName) } + +func testAccBatchTransform_outputConfig_kmsKeyId(rName string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 10 +} + +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + kms_key_id = aws_kms_key.test.arn + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} From f6157a70bed157f443a58616b1a07cc844da9fe9 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 16:49:35 -0700 Subject: [PATCH 17/49] fix monitoring_outputs bug --- internal/service/sagemaker/data_quality_job_definition.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index dba57dd3a75d..75682998f2ab 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -291,7 +291,7 @@ func ResourceDataQualityJobDefinition() *schema.Resource { }, "monitoring_outputs": { Type: schema.TypeList, - MinItems: 1, + MaxItems: 1, Required: true, ForceNew: true, Elem: &schema.Resource{ From 387af00f9daae5649d06ee82e72971698678114e Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 16:49:49 -0700 Subject: [PATCH 18/49] add outconfig optional test --- .../data_quality_job_definition_test.go | 94 +++++++++++++++++-- 1 file changed, 88 insertions(+), 6 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 1d67b38fee54..96a7f06dc3d5 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -541,6 +541,58 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_ouputConfig_kmsKeyI }) } +func TestAccSageMakerDataQualityJobDefinition_batchTransform_outputConfig_optional(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBatchTransform_outputConfig_optional(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.local_path", "/opt/ml/processing/local_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "Continuous"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -573,12 +625,6 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { // baselining_job_name // TO ADD: -// DataQualityJobOutputConfig optional -// kms_key_id -// monitoring_outputs (multiple) -// s3_output -// local_path -// s3_upload_mode // JobResources // cluster_config // volume_kms_key_id @@ -1233,3 +1279,39 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } `, rName)) } + +func testAccBatchTransform_outputConfig_optional(rName string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_upload_mode = "Continuous" + local_path = "/opt/ml/processing/local_path" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} From efa79038af33600651bae70e6a2765d5848955ae Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 19:27:44 -0700 Subject: [PATCH 19/49] adding job resources kms key test --- .../data_quality_job_definition_test.go | 92 +++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 96a7f06dc3d5..095c67757af8 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -593,6 +593,58 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_outputConfig_option }) } +func TestAccSageMakerDataQualityJobDefinition_batchTransform_jobResources_volumeKey(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBatchTransform_jobResources_volumeKey(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttrPair(resourceName, "job_resources.0.cluster_config.0.volume_kms_key_id", "aws_kms_key.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1315,3 +1367,43 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } `, rName)) } + +func testAccBatchTransform_jobResources_volumeKey(rName string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 10 +} + +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + volume_kms_key_id = aws_kms_key.test.arn + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} From 6faeb6c092acb5165b014d56975c084def0c4cd1 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 20:10:09 -0700 Subject: [PATCH 20/49] add stopping condition test --- .../data_quality_job_definition_test.go | 92 ++++++++++++++++++- 1 file changed, 89 insertions(+), 3 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 095c67757af8..7f2290dcf4d1 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -645,6 +645,57 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_jobResources_volume }) } +func TestAccSageMakerDataQualityJobDefinition_batchTransform_stoppingCondition(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBatchTransform_stoppingCondition(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), + resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), + resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), + resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), + resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), + resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "600"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -669,6 +720,7 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { }) } +// TODO: // Issues: // DataQualityAppSpecification // container_arguments @@ -677,9 +729,6 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { // baselining_job_name // TO ADD: -// JobResources -// cluster_config -// volume_kms_key_id // NetworkConfig required // NetworkConfig optional // enable_inter_container_traffic_encryption @@ -1407,3 +1456,40 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } `, rName)) } + +func testAccBatchTransform_stoppingCondition(rName string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + stopping_condition { + max_runtime_in_seconds = 600 + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} From c6b4c9ffc60230aff9966689c724367b9195401d Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 20:31:04 -0700 Subject: [PATCH 21/49] add tags test --- .../data_quality_job_definition_test.go | 122 ++++++++++++++++++ 1 file changed, 122 insertions(+) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 7f2290dcf4d1..10bc4840d583 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -696,6 +696,51 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_stoppingCondition(t }) } +func TestAccSageMakerDataQualityJobDefinition_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBatchTransform_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBatchTransform_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccBatchTransform_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1493,3 +1538,80 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } `, rName)) } + +func testAccBatchTransform_tags1(rName string, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccBatchTransform_tags2(rName string, tagKey1, tagValue1 string, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} From 0e1aacfc3b86e999d15906875c27c6ab5758a62c Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 20:50:35 -0700 Subject: [PATCH 22/49] clean up tests --- .../data_quality_job_definition_test.go | 265 ------------------ 1 file changed, 265 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 10bc4840d583..94382e4b14f5 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -82,35 +82,12 @@ func TestAccSageMakerDataQualityJobDefinition_appSpecification_optional(t *testi Config: testAccEndpoint_appSpecification_optional(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.0.environment.%", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.0.environment.foo", "bar"), resource.TestMatchResourceAttr(resourceName, "data_quality_app_specification.0.record_preprocessor_source_uri", regexp.MustCompile("pre.sh")), resource.TestMatchResourceAttr(resourceName, "data_quality_app_specification.0.post_analytics_processor_source_uri", regexp.MustCompile("post.sh")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_input.0.endpoint_input.0.endpoint_name", "aws_sagemaker_endpoint.test", "name"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_data_distribution_type", "FullyReplicated"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_input_mode", "File"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -138,34 +115,11 @@ func TestAccSageMakerDataQualityJobDefinition_baselineConfig(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_input.0.endpoint_input.0.endpoint_name", "aws_sagemaker_endpoint.test", "name"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_data_distribution_type", "FullyReplicated"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_input_mode", "File"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.0.constraints_resource.#", "1"), resource.TestMatchResourceAttr(resourceName, "data_quality_baseline_config.0.constraints_resource.0.s3_uri", regexp.MustCompile("constraints")), resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.0.statistics_resource.#", "1"), resource.TestMatchResourceAttr(resourceName, "data_quality_baseline_config.0.statistics_resource.0.s3_uri", regexp.MustCompile("statistics")), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -192,31 +146,11 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform(t *testing.T) { Config: testAccBatchTransform_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -243,32 +177,10 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_csvHeader(t *testin Config: testAccBatchTransform_csvHeader(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.0.header", "true"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -295,31 +207,9 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_json(t *testing.T) Config: testAccBatchTransform_json(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.json.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -346,32 +236,12 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_jsonLine(t *testing Config: testAccBatchTransform_jsonLine(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.json.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.json.0.line", "true"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -398,34 +268,11 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_optional(t *testing Config: testAccBatchTransform_optional(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.local_path", "/opt/ml/processing/local_path"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.s3_data_distribution_type", "ShardedByS3Key"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.s3_input_mode", "Pipe"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -452,32 +299,11 @@ func TestAccSageMakerDataQualityJobDefinition_endpoint_optional(t *testing.T) { Config: testAccEndpoint_optional(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_input.0.endpoint_input.0.endpoint_name", "aws_sagemaker_endpoint.test", "name"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.local_path", "/opt/ml/processing/local_path"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_data_distribution_type", "ShardedByS3Key"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.endpoint_input.0.s3_input_mode", "Pipe"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -504,32 +330,8 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_ouputConfig_kmsKeyI Config: testAccBatchTransform_outputConfig_kmsKeyId(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "data_quality_job_output_config.0.kms_key_id", "aws_kms_key.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -556,32 +358,11 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_outputConfig_option Config: testAccBatchTransform_outputConfig_optional(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.local_path", "/opt/ml/processing/local_path"), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "Continuous"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -608,32 +389,9 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_jobResources_volume Config: testAccBatchTransform_jobResources_volumeKey(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), resource.TestCheckResourceAttrPair(resourceName, "job_resources.0.cluster_config.0.volume_kms_key_id", "aws_kms_key.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "3600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -660,31 +418,8 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_stoppingCondition(t Config: testAccBatchTransform_stoppingCondition(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("data-quality-job-definition/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_quality_app_specification.0.image_uri", "data.aws_sagemaker_prebuilt_ecr_image.monitor", "registry_path"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.data_captured_destination_s3_uri", regexp.MustCompile("captured")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.0.dataset_format.0.csv.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.#", "1"), - resource.TestMatchResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_uri", regexp.MustCompile("output")), - resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.0.monitoring_outputs.0.s3_output.0.s3_upload_mode", "EndOfJob"), - resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_count", "1"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.instance_type", "ml.t3.medium"), - resource.TestCheckResourceAttr(resourceName, "job_resources.0.cluster_config.0.volume_size_in_gb", "20"), - resource.TestCheckResourceAttr(resourceName, "data_quality_baseline_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_config.#", "0"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), resource.TestCheckResourceAttr(resourceName, "stopping_condition.0.max_runtime_in_seconds", "600"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { From 10352c5970fdec5019925ec04436eedc9cfacd81 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 22:17:26 -0700 Subject: [PATCH 23/49] test name cleanup --- .../data_quality_job_definition_test.go | 132 +++++++----------- 1 file changed, 49 insertions(+), 83 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 94382e4b14f5..fd868585eff9 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -28,7 +28,7 @@ func TestAccSageMakerDataQualityJobDefinition_endpoint(t *testing.T) { CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccEndpoint_basic(rName), + Config: testAccDataQualityJobDefinitionConfig_endpointBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "name", rName), @@ -79,7 +79,7 @@ func TestAccSageMakerDataQualityJobDefinition_appSpecification_optional(t *testi CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccEndpoint_appSpecification_optional(rName), + Config: testAccDataQualityJobDefinitionConfig_endpointAppSpecificationOptional(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), @@ -111,7 +111,7 @@ func TestAccSageMakerDataQualityJobDefinition_baselineConfig(t *testing.T) { CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccEndpoint_baselineConfig(rName), + Config: testAccDataQualityJobDefinitionConfig_endpointBaselineConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "name", rName), @@ -143,7 +143,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform(t *testing.T) { CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_basic(rName), + Config: testAccDataQualityJobDefinitionConfig_batchTransformBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), @@ -174,7 +174,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_csvHeader(t *testin CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_csvHeader(rName), + Config: testAccDataQualityJobDefinitionConfig_batchTransformCsvHeader(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), @@ -204,7 +204,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_json(t *testing.T) CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_json(rName), + Config: testAccDataQualityJobDefinitionConfig_batchTransformJson(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), @@ -233,7 +233,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_jsonLine(t *testing CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_jsonLine(rName), + Config: testAccDataQualityJobDefinitionConfig_batchTransformJsonLine(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), @@ -265,7 +265,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_optional(t *testing CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_optional(rName), + Config: testAccDataQualityJobDefinitionConfig_batchTransformOptional(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), @@ -296,7 +296,7 @@ func TestAccSageMakerDataQualityJobDefinition_endpoint_optional(t *testing.T) { CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccEndpoint_optional(rName), + Config: testAccDataQualityJobDefinitionConfig_endpointOptional(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), @@ -327,7 +327,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_ouputConfig_kmsKeyI CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_outputConfig_kmsKeyId(rName), + Config: testAccDataQualityJobDefinitionConfig_outputConfigKmsKeyId(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), @@ -355,7 +355,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_outputConfig_option CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_outputConfig_optional(rName), + Config: testAccDataQualityJobDefinitionConfig_outputConfigOptional(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), @@ -386,7 +386,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_jobResources_volume CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_jobResources_volumeKey(rName), + Config: testAccDataQualityJobDefinitionConfig_jobResourcesVolumeKey(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), @@ -415,7 +415,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_stoppingCondition(t CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_stoppingCondition(rName), + Config: testAccDataQualityJobDefinitionConfig_stoppingCondition(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "stopping_condition.#", "1"), @@ -443,7 +443,7 @@ func TestAccSageMakerDataQualityJobDefinition_tags(t *testing.T) { CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBatchTransform_tags1(rName, "key1", "value1"), + Config: testAccDataQualityJobDefinitionConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -456,7 +456,7 @@ func TestAccSageMakerDataQualityJobDefinition_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccBatchTransform_tags2(rName, "key1", "value1updated", "key2", "value2"), + Config: testAccDataQualityJobDefinitionConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), @@ -465,7 +465,7 @@ func TestAccSageMakerDataQualityJobDefinition_tags(t *testing.T) { ), }, { - Config: testAccBatchTransform_tags1(rName, "key2", "value2"), + Config: testAccDataQualityJobDefinitionConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -488,7 +488,7 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccEndpoint_basic(rName), + Config: testAccDataQualityJobDefinitionConfig_batchTransformBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsagemaker.ResourceDataQualityJobDefinition(), resourceName), @@ -513,8 +513,6 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { // NetworkConfig optional // enable_inter_container_traffic_encryption // enable_network_isolation -// StoppingCondition required -// func testAccCheckDataQualityJobDefinitionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -559,7 +557,7 @@ func testAccCheckDataQualityJobDefinitionExists(ctx context.Context, n string) r } } -func testAccBatchTransform_Base(rName string) string { +func testAccDataQualityJobDefinitionConfig_batchTransformBase(rName string) string { return fmt.Sprintf(` provider "aws" { @@ -640,7 +638,7 @@ data "aws_sagemaker_prebuilt_ecr_image" "monitor" { `, rName) } -func testAccEndpoint_Base(rName string) string { +func testAccDataQualityJobDefinitionConfig_endpointBase(rName string) string { return fmt.Sprintf(` provider "aws" { @@ -772,8 +770,8 @@ data "aws_sagemaker_prebuilt_ecr_image" "monitor" { `, rName) } -func testAccEndpoint_basic(rName string) string { - return testAccEndpoint_Base(rName) + fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_endpointBasic(rName string) string { + return testAccDataQualityJobDefinitionConfig_endpointBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -803,8 +801,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName) } -func testAccEndpoint_appSpecification_optional(rName string) string { - return testAccEndpoint_Base(rName) + fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_endpointAppSpecificationOptional(rName string) string { + return testAccDataQualityJobDefinitionConfig_endpointBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -839,8 +837,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName) } -func testAccEndpoint_baselineConfig(rName string) string { - return testAccEndpoint_Base(rName) + fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_endpointBaselineConfig(rName string) string { + return testAccDataQualityJobDefinitionConfig_endpointBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -878,40 +876,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName) } -func testAccBatchTransform_basicTransformTemplate(rName string, dFormat string) string { - return testAccBatchTransform_Base(rName) + fmt.Sprintf(` -resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q - data_quality_app_specification { - image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path - } - data_quality_job_input { - batch_transform_input { - data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" - %[2]s - } - } - data_quality_job_output_config { - monitoring_outputs { - s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" - } - } - } - job_resources { - cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" - volume_size_in_gb = 20 - } - } - role_arn = aws_iam_role.test.arn -} -`, rName, dFormat) -} - -func testAccBatchTransform_basic(rName string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_batchTransformBasic(rName string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -944,8 +910,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccBatchTransform_csvHeader(rName string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_batchTransformCsvHeader(rName string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -980,8 +946,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccBatchTransform_json(rName string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_batchTransformJson(rName string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -1014,8 +980,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccBatchTransform_jsonLine(rName string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_batchTransformJsonLine(rName string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -1050,8 +1016,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccBatchTransform_optional(rName string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_batchTransformOptional(rName string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -1087,8 +1053,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccEndpoint_optional(rName string) string { - return testAccEndpoint_Base(rName) + fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_endpointOptional(rName string) string { + return testAccDataQualityJobDefinitionConfig_endpointBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -1121,8 +1087,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName) } -func testAccBatchTransform_outputConfig_kmsKeyId(rName string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_outputConfigKmsKeyId(rName string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_kms_key" "test" { description = %[1]q deletion_window_in_days = 10 @@ -1161,8 +1127,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccBatchTransform_outputConfig_optional(rName string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_outputConfigOptional(rName string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -1197,8 +1163,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccBatchTransform_jobResources_volumeKey(rName string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_jobResourcesVolumeKey(rName string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_kms_key" "test" { description = %[1]q deletion_window_in_days = 10 @@ -1237,8 +1203,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccBatchTransform_stoppingCondition(rName string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_stoppingCondition(rName string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -1274,8 +1240,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccBatchTransform_tags1(rName string, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_tags1(rName string, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -1312,8 +1278,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName, tagKey1, tagValue1)) } -func testAccBatchTransform_tags2(rName string, tagKey1, tagValue1 string, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccBatchTransform_Base(rName), fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_tags2(rName string, tagKey1, tagValue1 string, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { From 2c08d43479c2f05e0d7fb4327dcfa2ee6db1260e Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 22:37:57 -0700 Subject: [PATCH 24/49] more test cleanup --- .../data_quality_job_definition_test.go | 46 +++++++++++-------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index fd868585eff9..6a64c2c82e35 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -67,7 +67,7 @@ func TestAccSageMakerDataQualityJobDefinition_endpoint(t *testing.T) { }) } -func TestAccSageMakerDataQualityJobDefinition_appSpecification_optional(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_appSpecificationOptional(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -79,7 +79,7 @@ func TestAccSageMakerDataQualityJobDefinition_appSpecification_optional(t *testi CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataQualityJobDefinitionConfig_endpointAppSpecificationOptional(rName), + Config: testAccDataQualityJobDefinitionConfig_appSpecificationOptional(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_app_specification.#", "1"), @@ -111,7 +111,7 @@ func TestAccSageMakerDataQualityJobDefinition_baselineConfig(t *testing.T) { CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataQualityJobDefinitionConfig_endpointBaselineConfig(rName), + Config: testAccDataQualityJobDefinitionConfig_baselineConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "name", rName), @@ -162,7 +162,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform(t *testing.T) { }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransform_csvHeader(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_batchTransformCsvHeader(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -192,7 +192,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_csvHeader(t *testin }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransform_json(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_batchTransformJson(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -221,7 +221,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_json(t *testing.T) }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransform_jsonLine(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_batchTransformJsonLine(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -253,7 +253,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_jsonLine(t *testing }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransform_optional(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_batchTransformOptional(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -284,7 +284,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_optional(t *testing }) } -func TestAccSageMakerDataQualityJobDefinition_endpoint_optional(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_endpointOptional(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -315,7 +315,7 @@ func TestAccSageMakerDataQualityJobDefinition_endpoint_optional(t *testing.T) { }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransform_ouputConfig_kmsKeyId(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_ouputConfigKmsKeyId(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -343,7 +343,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_ouputConfig_kmsKeyI }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransform_outputConfig_optional(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_outputConfigOptional(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -374,7 +374,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_outputConfig_option }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransform_jobResources_volumeKey(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_jobResourcesVolumeKmsKeyId(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -403,7 +403,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform_jobResources_volume }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransform_stoppingCondition(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_stoppingCondition(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -801,8 +801,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName) } -func testAccDataQualityJobDefinitionConfig_endpointAppSpecificationOptional(rName string) string { - return testAccDataQualityJobDefinitionConfig_endpointBase(rName) + fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_appSpecificationOptional(rName string) string { + return testAccDataQualityJobDefinitionConfig_batchTransformBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -814,8 +814,11 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { post_analytics_processor_source_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/post.sh" } data_quality_job_input { - endpoint_input { - endpoint_name = aws_sagemaker_endpoint.test.name + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } } } data_quality_job_output_config { @@ -837,8 +840,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName) } -func testAccDataQualityJobDefinitionConfig_endpointBaselineConfig(rName string) string { - return testAccDataQualityJobDefinitionConfig_endpointBase(rName) + fmt.Sprintf(` +func testAccDataQualityJobDefinitionConfig_baselineConfig(rName string) string { + return testAccDataQualityJobDefinitionConfig_batchTransformBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -853,8 +856,11 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } } data_quality_job_input { - endpoint_input { - endpoint_name = aws_sagemaker_endpoint.test.name + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } } } data_quality_job_output_config { From d5cc8b2385638c27db7ee0f8866453f806d96d9f Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 17 Mar 2023 23:15:11 -0700 Subject: [PATCH 25/49] adding network config tests --- .../data_quality_job_definition_test.go | 238 ++++++++++++++++++ 1 file changed, 238 insertions(+) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 6a64c2c82e35..320d68640ec2 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -431,6 +431,92 @@ func TestAccSageMakerDataQualityJobDefinition_stoppingCondition(t *testing.T) { }) } +func TestAccSageMakerDataQualityJobDefinition_networkConfig(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataQualityJobDefinitionConfig_networkConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_config.0.vpc_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_config.0.vpc_config.0.security_group_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_config.0.vpc_config.0.subnets.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSageMakerDataQualityJobDefinition_networkConfigTrafficEncryption(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataQualityJobDefinitionConfig_networkConfigTrafficEncryption(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_config.0.enable_inter_container_traffic_encryption", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSageMakerDataQualityJobDefinition_networkConfigEnableNetworkIsolation(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_data_quality_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataQualityJobDefinitionConfig_networkConfigEnableNetworkIsolation(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_config.0.enable_network_isolation", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSageMakerDataQualityJobDefinition_tags(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1322,3 +1408,155 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } + +func testAccDataQualityJobDefinitionConfig_networkConfig(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 1), + testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), + fmt.Sprintf(` + +resource "aws_security_group" "test" { + count = 1 + + name = "%[1]s-${count.index}" +} + +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + network_config { + vpc_config { + subnets = aws_subnet.test[*].id + security_group_ids = aws_security_group.test[*].id + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} + +func testAccDataQualityJobDefinitionConfig_networkConfigTrafficEncryption(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 1), + testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), + fmt.Sprintf(` + +resource "aws_security_group" "test" { + count = 1 + + name = "%[1]s-${count.index}" +} + +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + network_config { + enable_inter_container_traffic_encryption = true + vpc_config { + subnets = aws_subnet.test[*].id + security_group_ids = aws_security_group.test[*].id + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} + +func testAccDataQualityJobDefinitionConfig_networkConfigEnableNetworkIsolation(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 1), + testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), + fmt.Sprintf(` + +resource "aws_security_group" "test" { + count = 1 + + name = "%[1]s-${count.index}" +} + +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + network_config { + enable_network_isolation = true + vpc_config { + subnets = aws_subnet.test[*].id + security_group_ids = aws_security_group.test[*].id + } + } + role_arn = aws_iam_role.test.arn +} +`, rName)) +} From 5b61d30df46c3723bb5fd4e91ab4e624557fba84 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 21 Mar 2023 10:12:13 -0700 Subject: [PATCH 26/49] first pass at ading monitoring_schedule resource --- internal/service/sagemaker/find.go | 25 ++ .../service/sagemaker/monitoring_schedule.go | 243 ++++++++++++++++++ .../sagemaker/monitoring_schedule_test.go | 214 +++++++++++++++ .../service/sagemaker/service_package_gen.go | 1 + internal/service/sagemaker/status.go | 16 ++ internal/service/sagemaker/wait.go | 94 +++++-- 6 files changed, 568 insertions(+), 25 deletions(-) create mode 100644 internal/service/sagemaker/monitoring_schedule.go create mode 100644 internal/service/sagemaker/monitoring_schedule_test.go diff --git a/internal/service/sagemaker/find.go b/internal/service/sagemaker/find.go index ab003ec98f0d..662e1bc58042 100644 --- a/internal/service/sagemaker/find.go +++ b/internal/service/sagemaker/find.go @@ -522,6 +522,31 @@ func FindDataQualityJobDefinitionByName(ctx context.Context, conn *sagemaker.Sag return output, nil } +func FindMonitoringScheduleByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeMonitoringScheduleOutput, error) { + input := &sagemaker.DescribeMonitoringScheduleInput{ + MonitoringScheduleName: aws.String(name), + } + + output, err := conn.DescribeMonitoringScheduleWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + func FindFlowDefinitionByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeFlowDefinitionOutput, error) { input := &sagemaker.DescribeFlowDefinitionInput{ FlowDefinitionName: aws.String(name), diff --git a/internal/service/sagemaker/monitoring_schedule.go b/internal/service/sagemaker/monitoring_schedule.go new file mode 100644 index 000000000000..00def37bcce9 --- /dev/null +++ b/internal/service/sagemaker/monitoring_schedule.go @@ -0,0 +1,243 @@ +package sagemaker + +import ( + "context" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +// @SDKResource("aws_sagemaker_monitoring_schedule") +func ResourceMonitoringSchedule() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceMonitoringScheduleCreate, + ReadWithoutTimeout: resourceMonitoringScheduleRead, + UpdateWithoutTimeout: resourceMonitoringScheduleUpdate, + DeleteWithoutTimeout: resourceMonitoringScheduleDelete, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validName, + }, + "monitoring_schedule_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "monitoring_job_definition_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validName, + }, + "monitoring_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(sagemaker.MonitoringType_Values(), false), + }, + }, + }, + }, + "tags": tftags.TagsSchema(), + "tags_all": tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +func resourceMonitoringScheduleCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(ctx, d.Get("tags").(map[string]interface{}))) + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else { + name = resource.UniqueId() + } + + createOpts := &sagemaker.CreateMonitoringScheduleInput{ + MonitoringScheduleName: aws.String(name), + MonitoringScheduleConfig: expandMonitoringScheduleConfig(d.Get("monitoring_schedule_config").([]interface{})), + } + + if len(tags) > 0 { + createOpts.Tags = Tags(tags.IgnoreAWS()) + } + + log.Printf("[DEBUG] SageMaker Monitoring Schedule create config: %#v", *createOpts) + _, err := conn.CreateMonitoringScheduleWithContext(ctx, createOpts) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating SageMaker Monitoring Schedule: %s", err) + } + d.SetId(name) + if _, err := WaitMonitoringScheduleScheduled(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "creating SageMaker Monitoring Schedule (%s): waiting for completion: %s", d.Id(), err) + } + + return append(diags, resourceMonitoringScheduleRead(ctx, d, meta)...) +} + +func expandMonitoringScheduleConfig(configured []interface{}) *sagemaker.MonitoringScheduleConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.MonitoringScheduleConfig{} + + if v, ok := m["monitoring_job_definition_name"].(string); ok && v != "" { + c.MonitoringJobDefinitionName = aws.String(v) + } + + if v, ok := m["monitoring_type"].(string); ok && v != "" { + c.MonitoringType = aws.String(v) + } + + return c +} + +func resourceMonitoringScheduleRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + + monitoringSchedule, err := FindMonitoringScheduleByName(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { + log.Printf("[WARN] SageMaker Monitoring Schedule (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading SageMaker Monitoring Schedule (%s): %s", d.Id(), err) + } + + d.Set("arn", monitoringSchedule.MonitoringScheduleArn) + d.Set("name", monitoringSchedule.MonitoringScheduleName) + + if err := d.Set("monitoring_schedule_config", flattenMonitoringScheduleConfig(monitoringSchedule.MonitoringScheduleConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting monitoring_schedule_config for SageMaker Monitoring Schedule (%s): %s", d.Id(), err) + } + + tags, err := ListTags(ctx, conn, aws.StringValue(monitoringSchedule.MonitoringScheduleArn)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing tags for SageMaker Monitoring Schedule (%s): %s", d.Id(), err) + } + + tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags_all: %s", err) + } + + return diags +} + +func flattenMonitoringScheduleConfig(monitoringScheduleConfig *sagemaker.MonitoringScheduleConfig) []map[string]interface{} { + if monitoringScheduleConfig == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if monitoringScheduleConfig.MonitoringJobDefinitionName != nil { + spec["monitoring_job_definition_name"] = aws.StringValue(monitoringScheduleConfig.MonitoringJobDefinitionName) + } + + if monitoringScheduleConfig.MonitoringType != nil { + spec["monitoring_type"] = aws.StringValue(monitoringScheduleConfig.MonitoringType) + } + + return []map[string]interface{}{spec} +} + +func resourceMonitoringScheduleUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(ctx, conn, d.Get("arn").(string), o, n); err != nil { + return sdkdiag.AppendErrorf(diags, "updating SageMaker Monitoring Schedule (%s) tags: %s", d.Id(), err) + } + } + + if d.HasChanges("monitoring_schedule_config") { + modifyOpts := &sagemaker.UpdateMonitoringScheduleInput{ + MonitoringScheduleName: aws.String(d.Id()), + } + + if v, ok := d.GetOk("monitoring_schedule_config"); ok && (len(v.([]interface{})) > 0) { + modifyOpts.MonitoringScheduleConfig = expandMonitoringScheduleConfig(v.([]interface{})) + } + + log.Printf("[INFO] Modifying monitoring_schedule_config attribute for %s: %#v", d.Id(), modifyOpts) + if _, err := conn.UpdateMonitoringScheduleWithContext(ctx, modifyOpts); err != nil { + return sdkdiag.AppendErrorf(diags, "updating SageMaker Monitoring Schedule (%s): %s", d.Id(), err) + } + if _, err := WaitMonitoringScheduleScheduled(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "creating SageMaker Monitoring Schedule (%s): waiting for completion: %s", d.Id(), err) + } + } + + return append(diags, resourceMonitoringScheduleRead(ctx, d, meta)...) +} + +func resourceMonitoringScheduleDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + + deleteOpts := &sagemaker.DeleteMonitoringScheduleInput{ + MonitoringScheduleName: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting SageMaker Monitoring Schedule : %s", d.Id()) + + _, err := conn.DeleteMonitoringScheduleWithContext(ctx, deleteOpts) + + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting SageMaker Monitoring Schedule (%s): %s", d.Id(), err) + } + + if _, err := WaitMonitoringScheduleNotFound(ctx, conn, d.Id()); err != nil { + if !tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { + return sdkdiag.AppendErrorf(diags, "waiting for SageMaker Monitoring Schedule (%s) to stop: %s", d.Id(), err) + } + } + return diags +} diff --git a/internal/service/sagemaker/monitoring_schedule_test.go b/internal/service/sagemaker/monitoring_schedule_test.go new file mode 100644 index 000000000000..1d37bb0a8560 --- /dev/null +++ b/internal/service/sagemaker/monitoring_schedule_test.go @@ -0,0 +1,214 @@ +package sagemaker_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/sagemaker" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfsagemaker "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func TestAccSageMakerMonitoringSchedule_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_monitoring_schedule.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMonitoringScheduleDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMonitoringScheduleConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMonitoringScheduleExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("monitoring-schedule/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "monitoring_schedule_config.0.monitoring_job_definition_name", "aws_sagemaker_data_quality_job_definition.test", "name"), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.monitoring_type", "DataQuality"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckMonitoringScheduleDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sagemaker_monitoring_schedule" { + continue + } + + _, err := tfsagemaker.FindMonitoringScheduleByName(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("SageMaker Monitoring Schedule (%s) still exists", rs.Primary.ID) + } + return nil + } +} + +func testAccCheckMonitoringScheduleExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("no SageMaker Monitoring Schedule ID is set") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn() + _, err := tfsagemaker.FindMonitoringScheduleByName(ctx, conn, rs.Primary.ID) + + return err + } +} + +func testAccMonitoringScheduleConfig_base(rName string) string { + return fmt.Sprintf(` + +provider "aws" { + region = "us-west-2" + + default_tags { + tags = { + "adsk:moniker" = "AMPSDEMO-C-UW2" + } + } +} + +data "aws_caller_identity" "current" {} + +data "aws_iam_policy" "boundary" { + arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/ADSK-Boundary" +} + +data "aws_iam_policy_document" "access" { + statement { + effect = "Allow" + + actions = [ + "cloudwatch:PutMetricData", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:CreateLogGroup", + "logs:DescribeLogStreams", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "s3:GetObject", + ] + + resources = ["*"] + } +} + +data "aws_partition" "current" {} + +data "aws_iam_policy_document" "assume_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.${data.aws_partition.current.dns_suffix}"] + } + } +} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + assume_role_policy = data.aws_iam_policy_document.assume_role.json + permissions_boundary = data.aws_iam_policy.boundary.arn +} + +resource "aws_iam_role_policy" "test" { + role = aws_iam_role.test.name + policy = data.aws_iam_policy_document.access.json +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +data "aws_sagemaker_prebuilt_ecr_image" "monitor" { + repository_name = "sagemaker-model-monitor-analyzer" + image_tag = "" +} + +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = %[1]q + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + batch_transform_input { + data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" + dataset_format { + csv {} + } + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.test.arn +} +`, rName) +} + +func testAccMonitoringScheduleConfig_basic(rName string) string { + return testAccMonitoringScheduleConfig_base(rName) + fmt.Sprintf(` +resource "aws_sagemaker_monitoring_schedule" "test" { + name = %[1]q + monitoring_schedule_config { + monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name + monitoring_type = "DataQuality" + } +} +`, rName) +} diff --git a/internal/service/sagemaker/service_package_gen.go b/internal/service/sagemaker/service_package_gen.go index bbe8dc74eb9c..13bfba69a275 100644 --- a/internal/service/sagemaker/service_package_gen.go +++ b/internal/service/sagemaker/service_package_gen.go @@ -46,6 +46,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) map[string]func() *sc "aws_sagemaker_model": ResourceModel, "aws_sagemaker_model_package_group": ResourceModelPackageGroup, "aws_sagemaker_model_package_group_policy": ResourceModelPackageGroupPolicy, + "aws_sagemaker_monitoring_schedule": ResourceMonitoringSchedule, "aws_sagemaker_notebook_instance": ResourceNotebookInstance, "aws_sagemaker_notebook_instance_lifecycle_configuration": ResourceNotebookInstanceLifeCycleConfiguration, "aws_sagemaker_project": ResourceProject, diff --git a/internal/service/sagemaker/status.go b/internal/service/sagemaker/status.go index 12847414536d..820590c0dcbf 100644 --- a/internal/service/sagemaker/status.go +++ b/internal/service/sagemaker/status.go @@ -250,3 +250,19 @@ func StatusSpace(ctx context.Context, conn *sagemaker.SageMaker, domainId, name return output, aws.StringValue(output.Status), nil } } + +func StatusMonitoringSchedule(ctx context.Context, conn *sagemaker.SageMaker, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindMonitoringScheduleByName(ctx, conn, name) + + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.MonitoringScheduleStatus), nil + } +} diff --git a/internal/service/sagemaker/wait.go b/internal/service/sagemaker/wait.go index 12c155ad0dcc..115e89465dbe 100644 --- a/internal/service/sagemaker/wait.go +++ b/internal/service/sagemaker/wait.go @@ -12,31 +12,33 @@ import ( ) const ( - NotebookInstanceInServiceTimeout = 60 * time.Minute - NotebookInstanceStoppedTimeout = 10 * time.Minute - NotebookInstanceDeletedTimeout = 10 * time.Minute - ModelPackageGroupCompletedTimeout = 10 * time.Minute - ModelPackageGroupDeletedTimeout = 10 * time.Minute - ImageCreatedTimeout = 10 * time.Minute - ImageDeletedTimeout = 10 * time.Minute - ImageVersionCreatedTimeout = 10 * time.Minute - ImageVersionDeletedTimeout = 10 * time.Minute - DomainInServiceTimeout = 10 * time.Minute - DomainDeletedTimeout = 10 * time.Minute - FeatureGroupCreatedTimeout = 10 * time.Minute - FeatureGroupDeletedTimeout = 10 * time.Minute - UserProfileInServiceTimeout = 10 * time.Minute - UserProfileDeletedTimeout = 10 * time.Minute - AppInServiceTimeout = 10 * time.Minute - AppDeletedTimeout = 10 * time.Minute - FlowDefinitionActiveTimeout = 2 * time.Minute - FlowDefinitionDeletedTimeout = 2 * time.Minute - ProjectCreatedTimeout = 15 * time.Minute - ProjectDeletedTimeout = 15 * time.Minute - WorkforceActiveTimeout = 10 * time.Minute - WorkforceDeletedTimeout = 10 * time.Minute - SpaceDeletedTimeout = 10 * time.Minute - SpaceInServiceTimeout = 10 * time.Minute + NotebookInstanceInServiceTimeout = 60 * time.Minute + NotebookInstanceStoppedTimeout = 10 * time.Minute + NotebookInstanceDeletedTimeout = 10 * time.Minute + ModelPackageGroupCompletedTimeout = 10 * time.Minute + ModelPackageGroupDeletedTimeout = 10 * time.Minute + ImageCreatedTimeout = 10 * time.Minute + ImageDeletedTimeout = 10 * time.Minute + ImageVersionCreatedTimeout = 10 * time.Minute + ImageVersionDeletedTimeout = 10 * time.Minute + DomainInServiceTimeout = 10 * time.Minute + DomainDeletedTimeout = 10 * time.Minute + FeatureGroupCreatedTimeout = 10 * time.Minute + FeatureGroupDeletedTimeout = 10 * time.Minute + UserProfileInServiceTimeout = 10 * time.Minute + UserProfileDeletedTimeout = 10 * time.Minute + AppInServiceTimeout = 10 * time.Minute + AppDeletedTimeout = 10 * time.Minute + FlowDefinitionActiveTimeout = 2 * time.Minute + FlowDefinitionDeletedTimeout = 2 * time.Minute + ProjectCreatedTimeout = 15 * time.Minute + ProjectDeletedTimeout = 15 * time.Minute + WorkforceActiveTimeout = 10 * time.Minute + WorkforceDeletedTimeout = 10 * time.Minute + SpaceDeletedTimeout = 10 * time.Minute + SpaceInServiceTimeout = 10 * time.Minute + MonitoringScheduleScheduledTimeout = 2 * time.Minute + MonitoringScheduleStoppedTimeout = 2 * time.Minute ) // WaitNotebookInstanceInService waits for a NotebookInstance to return InService @@ -644,3 +646,45 @@ func WaitSpaceDeleted(ctx context.Context, conn *sagemaker.SageMaker, domainId, return nil, err } + +func WaitMonitoringScheduleScheduled(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeMonitoringScheduleOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{sagemaker.ScheduleStatusPending}, + Target: []string{sagemaker.ScheduleStatusScheduled}, + Refresh: StatusMonitoringSchedule(ctx, conn, name), + Timeout: MonitoringScheduleScheduledTimeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*sagemaker.DescribeMonitoringScheduleOutput); ok { + if status, reason := aws.StringValue(output.MonitoringScheduleStatus), aws.StringValue(output.FailureReason); status == sagemaker.ScheduleStatusFailed && reason != "" { + tfresource.SetLastError(err, errors.New(reason)) + } + + return output, err + } + + return nil, err +} + +func WaitMonitoringScheduleNotFound(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeMonitoringScheduleOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{sagemaker.ScheduleStatusScheduled, sagemaker.ScheduleStatusPending, sagemaker.ScheduleStatusStopped}, + Target: []string{}, + Refresh: StatusMonitoringSchedule(ctx, conn, name), + Timeout: MonitoringScheduleStoppedTimeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*sagemaker.DescribeMonitoringScheduleOutput); ok { + if status, reason := aws.StringValue(output.MonitoringScheduleStatus), aws.StringValue(output.FailureReason); status == sagemaker.ScheduleStatusFailed && reason != "" { + tfresource.SetLastError(err, errors.New(reason)) + } + + return output, err + } + + return nil, err +} From 0b073edde815d6cfae5427e665e9c80a24854048 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 21 Mar 2023 16:54:29 -0700 Subject: [PATCH 27/49] add monitring schedule disappears test --- .../sagemaker/monitoring_schedule_test.go | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/internal/service/sagemaker/monitoring_schedule_test.go b/internal/service/sagemaker/monitoring_schedule_test.go index 1d37bb0a8560..116b9a8ff0a1 100644 --- a/internal/service/sagemaker/monitoring_schedule_test.go +++ b/internal/service/sagemaker/monitoring_schedule_test.go @@ -47,6 +47,30 @@ func TestAccSageMakerMonitoringSchedule_basic(t *testing.T) { }) } +func TestAccSageMakerMonitoringSchedule_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_monitoring_schedule.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMonitoringScheduleDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMonitoringScheduleConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMonitoringScheduleExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsagemaker.ResourceMonitoringSchedule(), resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsagemaker.ResourceMonitoringSchedule(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckMonitoringScheduleDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn() From fdfc3f473ed36ee77af764626d05cb6459483a33 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 21 Mar 2023 17:10:18 -0700 Subject: [PATCH 28/49] adding monitoring schedule tags test --- .../sagemaker/monitoring_schedule_test.go | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/internal/service/sagemaker/monitoring_schedule_test.go b/internal/service/sagemaker/monitoring_schedule_test.go index 116b9a8ff0a1..8e86ea15b4db 100644 --- a/internal/service/sagemaker/monitoring_schedule_test.go +++ b/internal/service/sagemaker/monitoring_schedule_test.go @@ -47,6 +47,51 @@ func TestAccSageMakerMonitoringSchedule_basic(t *testing.T) { }) } +func TestAccSageMakerMonitoringSchedule_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_monitoring_schedule.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMonitoringScheduleDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMonitoringScheduleConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccMonitoringScheduleConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccMonitoringScheduleConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func TestAccSageMakerMonitoringSchedule_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -236,3 +281,36 @@ resource "aws_sagemaker_monitoring_schedule" "test" { } `, rName) } + +func testAccMonitoringScheduleConfig_tags1(rName string, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccMonitoringScheduleConfig_base(rName), fmt.Sprintf(` +resource "aws_sagemaker_monitoring_schedule" "test" { + name = %[1]q + monitoring_schedule_config { + monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name + monitoring_type = "DataQuality" + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccMonitoringScheduleConfig_tags2(rName string, tagKey1, tagValue1 string, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccMonitoringScheduleConfig_base(rName), fmt.Sprintf(` +resource "aws_sagemaker_monitoring_schedule" "test" { + name = %[1]q + monitoring_schedule_config { + monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name + monitoring_type = "DataQuality" + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} From 51ad83f2ff045f07c0717e5512a1dea2b7d7a04a Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 21 Mar 2023 18:00:25 -0700 Subject: [PATCH 29/49] adding schedule_config --- .../service/sagemaker/monitoring_schedule.go | 56 ++++++++++++++ .../sagemaker/monitoring_schedule_test.go | 77 +++++++++++++++++++ 2 files changed, 133 insertions(+) diff --git a/internal/service/sagemaker/monitoring_schedule.go b/internal/service/sagemaker/monitoring_schedule.go index 00def37bcce9..2f287264429a 100644 --- a/internal/service/sagemaker/monitoring_schedule.go +++ b/internal/service/sagemaker/monitoring_schedule.go @@ -3,6 +3,7 @@ package sagemaker import ( "context" "log" + "regexp" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" @@ -55,6 +56,23 @@ func ResourceMonitoringSchedule() *schema.Resource { Required: true, ValidateFunc: validation.StringInSlice(sagemaker.MonitoringType_Values(), false), }, + "schedule_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schedule_expression": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringMatch(regexp.MustCompile(`^cron`), ""), + validation.StringLenBetween(1, 512), + ), + }, + }, + }, + }, }, }, }, @@ -117,6 +135,26 @@ func expandMonitoringScheduleConfig(configured []interface{}) *sagemaker.Monitor c.MonitoringType = aws.String(v) } + if v, ok := m["schedule_config"].([]interface{}); ok && len(v) > 0 { + c.ScheduleConfig = expandScheduleConfig(v) + } + + return c +} + +func expandScheduleConfig(configured []interface{}) *sagemaker.ScheduleConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.ScheduleConfig{} + + if v, ok := m["schedule_expression"].(string); ok && v != "" { + c.ScheduleExpression = aws.String(v) + } + return c } @@ -179,6 +217,24 @@ func flattenMonitoringScheduleConfig(monitoringScheduleConfig *sagemaker.Monitor spec["monitoring_type"] = aws.StringValue(monitoringScheduleConfig.MonitoringType) } + if monitoringScheduleConfig.ScheduleConfig != nil { + spec["schedule_config"] = flattenScheduleConfig(monitoringScheduleConfig.ScheduleConfig) + } + + return []map[string]interface{}{spec} +} + +func flattenScheduleConfig(scheduleConfig *sagemaker.ScheduleConfig) []map[string]interface{} { + if scheduleConfig == nil { + return []map[string]interface{}{} + } + + spec := map[string]interface{}{} + + if scheduleConfig.ScheduleExpression != nil { + spec["schedule_expression"] = aws.StringValue(scheduleConfig.ScheduleExpression) + } + return []map[string]interface{}{spec} } diff --git a/internal/service/sagemaker/monitoring_schedule_test.go b/internal/service/sagemaker/monitoring_schedule_test.go index 8e86ea15b4db..2cd4a78c7490 100644 --- a/internal/service/sagemaker/monitoring_schedule_test.go +++ b/internal/service/sagemaker/monitoring_schedule_test.go @@ -92,6 +92,53 @@ func TestAccSageMakerMonitoringSchedule_tags(t *testing.T) { }) } +func TestAccSageMakerMonitoringSchedule_scheduleExpression(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_monitoring_schedule.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMonitoringScheduleDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMonitoringScheduleConfig_scheduleExpressionHourly(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.0.schedule_expression", "cron(0 * ? * * *)"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccMonitoringScheduleConfig_scheduleExpressionDaily(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.0.schedule_expression", "cron(0 0 ? * * *)"), + ), + }, + { + Config: testAccMonitoringScheduleConfig_scheduleExpressionHourly(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.0.schedule_expression", "cron(0 * ? * * *)"), + ), + }, + }, + }) +} + func TestAccSageMakerMonitoringSchedule_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -314,3 +361,33 @@ resource "aws_sagemaker_monitoring_schedule" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } + +func testAccMonitoringScheduleConfig_scheduleExpressionHourly(rName string) string { + return acctest.ConfigCompose(testAccMonitoringScheduleConfig_base(rName), fmt.Sprintf(` +resource "aws_sagemaker_monitoring_schedule" "test" { + name = %[1]q + monitoring_schedule_config { + monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name + monitoring_type = "DataQuality" + schedule_config { + schedule_expression = "cron(0 * ? * * *)" + } + } +} +`, rName)) +} + +func testAccMonitoringScheduleConfig_scheduleExpressionDaily(rName string) string { + return acctest.ConfigCompose(testAccMonitoringScheduleConfig_base(rName), fmt.Sprintf(` +resource "aws_sagemaker_monitoring_schedule" "test" { + name = %[1]q + monitoring_schedule_config { + monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name + monitoring_type = "DataQuality" + schedule_config { + schedule_expression = "cron(0 0 ? * * *)" + } + } +} +`, rName)) +} From aca67fc4952b0150ebeb86ffaf105ef431b61246 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Thu, 23 Mar 2023 17:19:42 -0700 Subject: [PATCH 30/49] first pass at data quality job def docs --- ..._data_quality_job_definition.html.markdown | 153 ++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 website/docs/r/sagemaker_data_quality_job_definition.html.markdown diff --git a/website/docs/r/sagemaker_data_quality_job_definition.html.markdown b/website/docs/r/sagemaker_data_quality_job_definition.html.markdown new file mode 100644 index 000000000000..c3b677b3901f --- /dev/null +++ b/website/docs/r/sagemaker_data_quality_job_definition.html.markdown @@ -0,0 +1,153 @@ +--- +subcategory: "SageMaker" +layout: "aws" +page_title: "AWS: aws_sagemaker_data_quality_job_definition" +description: |- + Provides a SageMaker Data Quality Job Definition resource. +--- + +# Resource: aws_sagemaker_data_quality_job_definition + +Provides a SageMaker data quality job definition resource. + +## Example Usage + +Basic usage: + +```terraform +resource "aws_sagemaker_data_quality_job_definition" "test" { + name = "my-data-quality-job-definition" + + data_quality_app_specification { + image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path + } + data_quality_job_input { + endpoint_input { + endpoint_name = aws_sagemaker_endpoint.my_endpoint.name + } + } + data_quality_job_output_config { + monitoring_outputs { + s3_output { + s3_uri = "https://${aws_s3_bucket.my_bucket.bucket_regional_domain_name}/output" + } + } + } + job_resources { + cluster_config { + instance_count = 1 + instance_type = "ml.t3.medium" + volume_size_in_gb = 20 + } + } + role_arn = aws_iam_role.my_role.arn +} +``` + +## Argument Reference + +The following arguments are supported: + +* `data_quality_app_specification` - (Required) Specifies the container that runs the monitoring job. Fields are documented below. +* `data_quality_baseline_config` - (Optional) Configures the constraints and baselines for the monitoring job. Fields are documented below. +* `data_quality_job_input` - (Required) A list of inputs for the monitoring job. Fields are documented below. +* `data_quality_job_output_config` - (Required) The output configuration for monitoring jobs. Fields are documented below. +* `job_resources` - (Required) Identifies the resources to deploy for a monitoring job. Fields are documented below. +* `name` - (Optional) The name of the data quality job definition. If omitted, Terraform will assign a random, unique name. +* `network_config` - (Optional) Specifies networking configuration for the monitoring job. Fields are documented below. +* `role_arn` - (Required) The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. +* `stopping_condition` - (Optional) A time limit for how long the monitoring job is allowed to run before stopping. Fields are documented below. +* `tags` - (Optional) A mapping of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### data_quality_app_specification + +* `environment` - (Optional) Sets the environment variables in the container that the monitoring job runs. A list of key value pairs. +* `image_uri` - (Required) The container image that the data quality monitoring job runs. +* `post_analytics_processor_source_uri` - (Optional) An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers. +* `record_preprocessor_source_uri` - (Optional) An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers. + +### data_quality_baseline_config +* `constraints_resource` - (Optional) The constraints resource for a monitoring job. Fields are documented below. +* `statistics_resource` - (Optional) The statistics resource for a monitoring job. Fields are documented below. + +#### constraints_resource +* `s3_uri` - (Optional) The Amazon S3 URI for the constraints resource. + +#### statistics_resource +* `s3_uri` - (Optional) The Amazon S3 URI for the statistics resource. + +### data_quality_job_input +* `batch_transform_input` - (Optional) Input object for the batch transform job. Fields are documented below. +* `endpoint_input` - (Optional) Input object for the endpoint. Fields are documented below. + +#### batch_transform_input +* `data_captured_destination_s3_uri` - (Required) The Amazon S3 location being used to capture the data. +* `dataset_format` - (Required) The dataset format for your batch transform job. Fields are documented below. +* `local_path` - (Optional) Path to the filesystem where the batch transform data is available to the container. Defaults to `/opt/ml/processing/input`. +* `s3_data_distribution_type` - (Optional) Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defaults to `FullyReplicated`. Valid values are `FullyReplicated` or `ShardedByS3Key` +* `s3_input_mode` - (Optional) Whether the `Pipe` or `File` is used as the input mode for transferring data for the monitoring job. `Pipe` mode is recommended for large datasets. `File` mode is useful for small files that fit in memory. Defaults to `File`. Valid values are `Pipe` or `File` + +##### dataset_format +* `csv` - (Optional) The CSV dataset used in the monitoring job. Fields are documented below. +* `json` - (Optional) The JSON dataset used in the monitoring job. Fields are documented below. + +###### csv +* `header` - (Optional) Indicates if the CSV data has a header. + +###### json +* `line` - (Optional) Indicates if the file should be read as a json object per line. + +#### endpoint_input +* `endpoint_name` - (Required) An endpoint in customer's account which has `data_capture_config` enabled. +* `local_path` - (Optional) Path to the filesystem where the endpoint data is available to the container. Defaults to `/opt/ml/processing/input`. +* `s3_data_distribution_type` - (Optional) Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defaults to `FullyReplicated`. Valid values are `FullyReplicated` or `ShardedByS3Key` +* `s3_input_mode` - (Optional) Whether the `Pipe` or `File` is used as the input mode for transferring data for the monitoring job. `Pipe` mode is recommended for large datasets. `File` mode is useful for small files that fit in memory. Defaults to `File`. Valid values are `Pipe` or `File` + +### data_quality_job_output_config +* `kms_key_id` - (Optional) The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. +* `monitoring_outputs` - (Required) Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded. Fields are documented below. + +#### monitoring_outputs +* `s3_output` - (Required) The Amazon S3 storage location where the results of a monitoring job are saved. Fields are documented below. + +##### s3_output +* `local_path` - (Optional) The local path to the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job. LocalPath is an absolute path for the output data. Defaults to `/opt/ml/processing/output`. +* `s3_upload_mode` - (Optional) Whether to upload the results of the monitoring job continuously or after the job completes. Valid values are `Continuous` or `EndOfJob` +* `s3_uri` - (Required) A URI that identifies the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job. + +### job_resources +* `cluster_config` - (Required) The configuration for the cluster resources used to run the processing job. Fields are documented below. + +#### cluster_config +* `instance_count` - (Required) The number of ML compute instances to use in the model monitoring job. For distributed processing jobs, specify a value greater than 1. +* `instance_type` - (Required) The ML compute instance type for the processing job. +* `volume_kms_key_id` - (Optional) The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job. +* `volume_size_in_gb` - (Required) The size of the ML storage volume, in gigabytes, that you want to provision. You must specify sufficient ML storage for your scenario. + +### network_config +* `enable_inter_container_traffic_encryption` - (Optional) Whether to encrypt all communications between the instances used for the monitoring jobs. Choose `true` to encrypt communications. Encryption provides greater security for distributed jobs, but the processing might take longer. +* `enable_network_isolation` - (Optional) Whether to allow inbound and outbound network calls to and from the containers used for the monitoring job. +* `vpc_config` - (Optional) Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. Fields are documented below. + +#### vpc_config +* `security_group_ids` - (Required) The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the `subnets` field. +* `subnets` - (Required) The ID of the subnets in the VPC to which you want to connect your training job or model. + +### stopping_condition +* `max_runtime_in_seconds` - (Required) The maximum runtime allowed in seconds. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this endpoint configuration. +* `name` - The name of the endpoint configuration. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +Endpoint configurations can be imported using the `name`, e.g., + +``` +$ terraform import aws_sagemaker_data_quality_job_definition.test_data_quality_job_definition data-quality-job-definition-foo +``` From e19b6075517b4acd1313195a7ead2b1e1512a680 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 24 Mar 2023 11:41:15 -0700 Subject: [PATCH 31/49] fix data quality docs --- .../r/sagemaker_data_quality_job_definition.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/r/sagemaker_data_quality_job_definition.html.markdown b/website/docs/r/sagemaker_data_quality_job_definition.html.markdown index c3b677b3901f..bd59de3b98be 100644 --- a/website/docs/r/sagemaker_data_quality_job_definition.html.markdown +++ b/website/docs/r/sagemaker_data_quality_job_definition.html.markdown @@ -140,13 +140,13 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this endpoint configuration. -* `name` - The name of the endpoint configuration. +* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this data quality job definition. +* `name` - The name of the data quality job definition. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import -Endpoint configurations can be imported using the `name`, e.g., +Data quality job definitions can be imported using the `name`, e.g., ``` $ terraform import aws_sagemaker_data_quality_job_definition.test_data_quality_job_definition data-quality-job-definition-foo From 898d2cb26f733af7a26b88c147ed67fc6cc5eecc Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Fri, 24 Mar 2023 11:41:28 -0700 Subject: [PATCH 32/49] add monitoring schedule docs --- ...agemaker_monitoring_schedule.html.markdown | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 website/docs/r/sagemaker_monitoring_schedule.html.markdown diff --git a/website/docs/r/sagemaker_monitoring_schedule.html.markdown b/website/docs/r/sagemaker_monitoring_schedule.html.markdown new file mode 100644 index 000000000000..00311ca42dc0 --- /dev/null +++ b/website/docs/r/sagemaker_monitoring_schedule.html.markdown @@ -0,0 +1,58 @@ +--- +subcategory: "SageMaker" +layout: "aws" +page_title: "AWS: aws_sagemaker_monitoring_schedule" +description: |- + Provides a SageMaker Monitoring Schedule resource. +--- + +# Resource: aws_sagemaker_data_quality_job_definition + +Provides a SageMaker data quality job definition resource. + +## Example Usage + +Basic usage: + +```terraform +resource "aws_sagemaker_monitoring_schedule" "test" { + name = "my-monitoring-schedule" + monitoring_schedule_config { + monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name + monitoring_type = "DataQuality" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `monitoring_schedule_config` - (Required) The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below. +* `name` - (Optional) The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, Terraform will assign a random, unique name. +* `tags` - (Optional) A mapping of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### monitoring_schedule_config + +* `monitoring_job_definition_name` - (Required) The name of the monitoring job definition to schedule. +* `monitoring_type` - (Required) The type of the monitoring job definition to schedule. Valid values are `DataQuality`, `ModelQuality`, `ModelBias` or `ModelExplainability` +* `schedule_config` - (Optional) Configures the monitoring schedule. Fields are documented below. + +#### schedule_config +* `schedule_expression` - (Required) A cron expression that describes details about the monitoring schedule. For example, and hourly schedule would be `cron(0 * ? * * *)`. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this monitoring schedule. +* `name` - The name of the monitoring schedule. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +Monitoring schedules can be imported using the `name`, e.g., + +``` +$ terraform import aws_sagemaker_monitoring_schedule.test_monitoring_schedule monitoring-schedule-foo +``` From 765544f391e8f4367cce7bf0a2a6f924a39f767d Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 28 Mar 2023 15:02:26 -0700 Subject: [PATCH 33/49] bug fix with monitoring schedule plans --- internal/service/sagemaker/monitoring_schedule.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/sagemaker/monitoring_schedule.go b/internal/service/sagemaker/monitoring_schedule.go index 2f287264429a..4dc615f019b4 100644 --- a/internal/service/sagemaker/monitoring_schedule.go +++ b/internal/service/sagemaker/monitoring_schedule.go @@ -59,6 +59,7 @@ func ResourceMonitoringSchedule() *schema.Resource { "schedule_config": { Type: schema.TypeList, MaxItems: 1, + Computed: true, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ From d11e4d7d61df7f3dc576a3348408159c76c0ee03 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 28 Mar 2023 15:02:47 -0700 Subject: [PATCH 34/49] remove TODO list --- .../sagemaker/data_quality_job_definition_test.go | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index fc463b79a1cc..e1f8f778e07f 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -586,20 +586,6 @@ func TestAccSageMakerDataQualityJobDefinition_disappears(t *testing.T) { }) } -// TODO: -// Issues: -// DataQualityAppSpecification -// container_arguments -// container_entrypoint -// DataQualityBaselineConfig optional -// baselining_job_name - -// TO ADD: -// NetworkConfig required -// NetworkConfig optional -// enable_inter_container_traffic_encryption -// enable_network_isolation - func testAccCheckDataQualityJobDefinitionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn() From e6914b18b57be98fab6ffc6bfcbba3af359b3406 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 28 Mar 2023 15:03:10 -0700 Subject: [PATCH 35/49] code cleanup --- .../sagemaker/data_quality_job_definition.go | 348 ++++++++---------- .../service/sagemaker/monitoring_schedule.go | 32 +- 2 files changed, 168 insertions(+), 212 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index 75682998f2ab..b18ec1b58131 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -41,20 +41,6 @@ func ResourceDataQualityJobDefinition() *schema.Resource { ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "container_arguments": { - Type: schema.TypeSet, - MinItems: 1, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "container_entrypoint": { - Type: schema.TypeSet, - MinItems: 1, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, "environment": { Type: schema.TypeMap, Optional: true, @@ -96,12 +82,6 @@ func ResourceDataQualityJobDefinition() *schema.Resource { ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "baselining_job_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validName, - }, "constraints_resource": { Type: schema.TypeList, MaxItems: 1, @@ -409,7 +389,7 @@ func ResourceDataQualityJobDefinition() *schema.Resource { "vpc_config": { Type: schema.TypeList, MaxItems: 1, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -587,354 +567,342 @@ func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.Resourc return diags } -func flattenDataQualityAppSpecification(appSpecification *sagemaker.DataQualityAppSpecification) []map[string]interface{} { - if appSpecification == nil { +func flattenDataQualityAppSpecification(config *sagemaker.DataQualityAppSpecification) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if appSpecification.ImageUri != nil { - spec["image_uri"] = aws.StringValue(appSpecification.ImageUri) + if config.ImageUri != nil { + m["image_uri"] = aws.StringValue(config.ImageUri) } - if appSpecification.ContainerArguments != nil { - spec["container_arguments"] = flex.FlattenStringSet(appSpecification.ContainerArguments) + if config.Environment != nil { + m["environment"] = aws.StringValueMap(config.Environment) } - if appSpecification.ContainerEntrypoint != nil { - spec["container_entrypoint"] = flex.FlattenStringSet(appSpecification.ContainerEntrypoint) + if config.PostAnalyticsProcessorSourceUri != nil { + m["post_analytics_processor_source_uri"] = aws.StringValue(config.PostAnalyticsProcessorSourceUri) } - if appSpecification.Environment != nil { - spec["environment"] = aws.StringValueMap(appSpecification.Environment) + if config.RecordPreprocessorSourceUri != nil { + m["record_preprocessor_source_uri"] = aws.StringValue(config.RecordPreprocessorSourceUri) } - if appSpecification.PostAnalyticsProcessorSourceUri != nil { - spec["post_analytics_processor_source_uri"] = aws.StringValue(appSpecification.PostAnalyticsProcessorSourceUri) - } - - if appSpecification.RecordPreprocessorSourceUri != nil { - spec["record_preprocessor_source_uri"] = aws.StringValue(appSpecification.RecordPreprocessorSourceUri) - } - - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } -func flattenDataQualityBaselineConfig(baselineConfig *sagemaker.DataQualityBaselineConfig) []map[string]interface{} { - if baselineConfig == nil { +func flattenDataQualityBaselineConfig(config *sagemaker.DataQualityBaselineConfig) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - fConfig := map[string]interface{}{} + m := map[string]interface{}{} - if baselineConfig.BaseliningJobName != nil { - fConfig["baselining_job_name"] = aws.StringValue(baselineConfig.BaseliningJobName) + if config.ConstraintsResource != nil { + m["constraints_resource"] = flattenConstraintsResource(config.ConstraintsResource) } - if baselineConfig.ConstraintsResource != nil { - fConfig["constraints_resource"] = flattenConstraintsResource(baselineConfig.ConstraintsResource) + if config.StatisticsResource != nil { + m["statistics_resource"] = flattenStatisticsResource(config.StatisticsResource) } - if baselineConfig.StatisticsResource != nil { - fConfig["statistics_resource"] = flattenStatisticsResource(baselineConfig.StatisticsResource) - } - - return []map[string]interface{}{fConfig} + return []map[string]interface{}{m} } -func flattenConstraintsResource(constraintsResource *sagemaker.MonitoringConstraintsResource) []map[string]interface{} { - if constraintsResource == nil { +func flattenConstraintsResource(config *sagemaker.MonitoringConstraintsResource) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - fResource := map[string]interface{}{} + m := map[string]interface{}{} - if constraintsResource.S3Uri != nil { - fResource["s3_uri"] = aws.StringValue(constraintsResource.S3Uri) + if config.S3Uri != nil { + m["s3_uri"] = aws.StringValue(config.S3Uri) } - return []map[string]interface{}{fResource} + return []map[string]interface{}{m} } -func flattenStatisticsResource(statisticsResource *sagemaker.MonitoringStatisticsResource) []map[string]interface{} { - if statisticsResource == nil { +func flattenStatisticsResource(config *sagemaker.MonitoringStatisticsResource) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - fResource := map[string]interface{}{} + m := map[string]interface{}{} - if statisticsResource.S3Uri != nil { - fResource["s3_uri"] = aws.StringValue(statisticsResource.S3Uri) + if config.S3Uri != nil { + m["s3_uri"] = aws.StringValue(config.S3Uri) } - return []map[string]interface{}{fResource} + return []map[string]interface{}{m} } -func flattenDataQualityJobInput(jobInput *sagemaker.DataQualityJobInput) []map[string]interface{} { - if jobInput == nil { +func flattenDataQualityJobInput(config *sagemaker.DataQualityJobInput) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if jobInput.EndpointInput != nil { - spec["endpoint_input"] = flattenEndpointInput(jobInput.EndpointInput) + if config.EndpointInput != nil { + m["endpoint_input"] = flattenEndpointInput(config.EndpointInput) } - if jobInput.BatchTransformInput != nil { - spec["batch_transform_input"] = flattenBatchTransformInput(jobInput.BatchTransformInput) + if config.BatchTransformInput != nil { + m["batch_transform_input"] = flattenBatchTransformInput(config.BatchTransformInput) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } -func flattenBatchTransformInput(transformInput *sagemaker.BatchTransformInput_) []map[string]interface{} { - if transformInput == nil { +func flattenBatchTransformInput(config *sagemaker.BatchTransformInput_) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - fInput := map[string]interface{}{} + m := map[string]interface{}{} - if transformInput.LocalPath != nil { - fInput["local_path"] = aws.StringValue(transformInput.LocalPath) + if config.LocalPath != nil { + m["local_path"] = aws.StringValue(config.LocalPath) } - if transformInput.DataCapturedDestinationS3Uri != nil { - fInput["data_captured_destination_s3_uri"] = aws.StringValue(transformInput.DataCapturedDestinationS3Uri) + if config.DataCapturedDestinationS3Uri != nil { + m["data_captured_destination_s3_uri"] = aws.StringValue(config.DataCapturedDestinationS3Uri) } - if transformInput.DatasetFormat != nil { - fInput["dataset_format"] = flattenDatasetFormat(transformInput.DatasetFormat) + if config.DatasetFormat != nil { + m["dataset_format"] = flattenDatasetFormat(config.DatasetFormat) } - if transformInput.S3DataDistributionType != nil { - fInput["s3_data_distribution_type"] = aws.StringValue(transformInput.S3DataDistributionType) + if config.S3DataDistributionType != nil { + m["s3_data_distribution_type"] = aws.StringValue(config.S3DataDistributionType) } - if transformInput.S3InputMode != nil { - fInput["s3_input_mode"] = aws.StringValue(transformInput.S3InputMode) + if config.S3InputMode != nil { + m["s3_input_mode"] = aws.StringValue(config.S3InputMode) } - return []map[string]interface{}{fInput} + return []map[string]interface{}{m} } -func flattenDatasetFormat(datasetFormat *sagemaker.MonitoringDatasetFormat) []map[string]interface{} { - if datasetFormat == nil { +func flattenDatasetFormat(config *sagemaker.MonitoringDatasetFormat) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - fFormat := map[string]interface{}{} + m := map[string]interface{}{} - if datasetFormat.Csv != nil { - fFormat["csv"] = flattenCsv(datasetFormat.Csv) + if config.Csv != nil { + m["csv"] = flattenCsv(config.Csv) } - if datasetFormat.Json != nil { - fFormat["json"] = flattenJson(datasetFormat.Json) + if config.Json != nil { + m["json"] = flattenJson(config.Json) } - return []map[string]interface{}{fFormat} + return []map[string]interface{}{m} } -func flattenCsv(csv *sagemaker.MonitoringCsvDatasetFormat) []map[string]interface{} { - if csv == nil { +func flattenCsv(config *sagemaker.MonitoringCsvDatasetFormat) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - fCsv := map[string]interface{}{} + m := map[string]interface{}{} - if csv.Header != nil { - fCsv["header"] = aws.BoolValue(csv.Header) + if config.Header != nil { + m["header"] = aws.BoolValue(config.Header) } - return []map[string]interface{}{fCsv} + return []map[string]interface{}{m} } -func flattenJson(json *sagemaker.MonitoringJsonDatasetFormat) []map[string]interface{} { - if json == nil { +func flattenJson(config *sagemaker.MonitoringJsonDatasetFormat) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - fJson := map[string]interface{}{} + m := map[string]interface{}{} - if json.Line != nil { - fJson["line"] = aws.BoolValue(json.Line) + if config.Line != nil { + m["line"] = aws.BoolValue(config.Line) } - return []map[string]interface{}{fJson} + return []map[string]interface{}{m} } -func flattenEndpointInput(endpointInput *sagemaker.EndpointInput) []map[string]interface{} { - if endpointInput == nil { +func flattenEndpointInput(config *sagemaker.EndpointInput) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if endpointInput.EndpointName != nil { - spec["endpoint_name"] = aws.StringValue(endpointInput.EndpointName) + if config.EndpointName != nil { + m["endpoint_name"] = aws.StringValue(config.EndpointName) } - if endpointInput.LocalPath != nil { - spec["local_path"] = aws.StringValue(endpointInput.LocalPath) + if config.LocalPath != nil { + m["local_path"] = aws.StringValue(config.LocalPath) } - if endpointInput.S3DataDistributionType != nil { - spec["s3_data_distribution_type"] = aws.StringValue(endpointInput.S3DataDistributionType) + if config.S3DataDistributionType != nil { + m["s3_data_distribution_type"] = aws.StringValue(config.S3DataDistributionType) } - if endpointInput.S3InputMode != nil { - spec["s3_input_mode"] = aws.StringValue(endpointInput.S3InputMode) + if config.S3InputMode != nil { + m["s3_input_mode"] = aws.StringValue(config.S3InputMode) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } -func flattenDataQualityJobOutputConfig(outputConfig *sagemaker.MonitoringOutputConfig) []map[string]interface{} { - if outputConfig == nil { +func flattenDataQualityJobOutputConfig(config *sagemaker.MonitoringOutputConfig) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if outputConfig.KmsKeyId != nil { - spec["kms_key_id"] = aws.StringValue(outputConfig.KmsKeyId) + if config.KmsKeyId != nil { + m["kms_key_id"] = aws.StringValue(config.KmsKeyId) } - if outputConfig.MonitoringOutputs != nil { - spec["monitoring_outputs"] = flattenMonitoringOutputs(outputConfig.MonitoringOutputs) + if config.MonitoringOutputs != nil { + m["monitoring_outputs"] = flattenMonitoringOutputs(config.MonitoringOutputs) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } func flattenMonitoringOutputs(list []*sagemaker.MonitoringOutput) []map[string]interface{} { - containers := make([]map[string]interface{}, 0, len(list)) + outputs := make([]map[string]interface{}, 0, len(list)) for _, lRaw := range list { - monitoringOutput := make(map[string]interface{}) - monitoringOutput["s3_output"] = flattenS3Output(lRaw.S3Output) - containers = append(containers, monitoringOutput) + m := make(map[string]interface{}) + m["s3_output"] = flattenS3Output(lRaw.S3Output) + outputs = append(outputs, m) } - return containers + return outputs } -func flattenS3Output(s3Output *sagemaker.MonitoringS3Output) []map[string]interface{} { - if s3Output == nil { +func flattenS3Output(config *sagemaker.MonitoringS3Output) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if s3Output.LocalPath != nil { - spec["local_path"] = aws.StringValue(s3Output.LocalPath) + if config.LocalPath != nil { + m["local_path"] = aws.StringValue(config.LocalPath) } - if s3Output.S3UploadMode != nil { - spec["s3_upload_mode"] = aws.StringValue(s3Output.S3UploadMode) + if config.S3UploadMode != nil { + m["s3_upload_mode"] = aws.StringValue(config.S3UploadMode) } - if s3Output.S3Uri != nil { - spec["s3_uri"] = aws.StringValue(s3Output.S3Uri) + if config.S3Uri != nil { + m["s3_uri"] = aws.StringValue(config.S3Uri) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } -func flattenJobResources(jobResources *sagemaker.MonitoringResources) []map[string]interface{} { - if jobResources == nil { +func flattenJobResources(config *sagemaker.MonitoringResources) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if jobResources.ClusterConfig != nil { - spec["cluster_config"] = flattenClusterConfig(jobResources.ClusterConfig) + if config.ClusterConfig != nil { + m["cluster_config"] = flattenClusterConfig(config.ClusterConfig) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } -func flattenClusterConfig(clusterConfig *sagemaker.MonitoringClusterConfig) []map[string]interface{} { - if clusterConfig == nil { +func flattenClusterConfig(config *sagemaker.MonitoringClusterConfig) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if clusterConfig.InstanceCount != nil { - spec["instance_count"] = aws.Int64Value(clusterConfig.InstanceCount) + if config.InstanceCount != nil { + m["instance_count"] = aws.Int64Value(config.InstanceCount) } - if clusterConfig.InstanceType != nil { - spec["instance_type"] = aws.StringValue(clusterConfig.InstanceType) + if config.InstanceType != nil { + m["instance_type"] = aws.StringValue(config.InstanceType) } - if clusterConfig.VolumeKmsKeyId != nil { - spec["volume_kms_key_id"] = aws.StringValue(clusterConfig.VolumeKmsKeyId) + if config.VolumeKmsKeyId != nil { + m["volume_kms_key_id"] = aws.StringValue(config.VolumeKmsKeyId) } - if clusterConfig.VolumeSizeInGB != nil { - spec["volume_size_in_gb"] = aws.Int64Value(clusterConfig.VolumeSizeInGB) + if config.VolumeSizeInGB != nil { + m["volume_size_in_gb"] = aws.Int64Value(config.VolumeSizeInGB) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } -func flattenNetworkConfig(networkConfig *sagemaker.MonitoringNetworkConfig) []map[string]interface{} { - if networkConfig == nil { +func flattenNetworkConfig(config *sagemaker.MonitoringNetworkConfig) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if networkConfig.EnableInterContainerTrafficEncryption != nil { - spec["enable_inter_container_traffic_encryption"] = aws.BoolValue(networkConfig.EnableInterContainerTrafficEncryption) + if config.EnableInterContainerTrafficEncryption != nil { + m["enable_inter_container_traffic_encryption"] = aws.BoolValue(config.EnableInterContainerTrafficEncryption) } - if networkConfig.EnableNetworkIsolation != nil { - spec["enable_network_isolation"] = aws.BoolValue(networkConfig.EnableNetworkIsolation) + if config.EnableNetworkIsolation != nil { + m["enable_network_isolation"] = aws.BoolValue(config.EnableNetworkIsolation) } - if networkConfig.VpcConfig != nil { - spec["vpc_config"] = flattenVpcConfig(networkConfig.VpcConfig) + if config.VpcConfig != nil { + m["vpc_config"] = flattenVpcConfig(config.VpcConfig) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } -func flattenVpcConfig(vpcConfig *sagemaker.VpcConfig) []map[string]interface{} { - if vpcConfig == nil { +func flattenVpcConfig(config *sagemaker.VpcConfig) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if vpcConfig.SecurityGroupIds != nil { - spec["security_group_ids"] = flex.FlattenStringSet(vpcConfig.SecurityGroupIds) + if config.SecurityGroupIds != nil { + m["security_group_ids"] = flex.FlattenStringSet(config.SecurityGroupIds) } - if vpcConfig.Subnets != nil { - spec["subnets"] = flex.FlattenStringSet(vpcConfig.Subnets) + if config.Subnets != nil { + m["subnets"] = flex.FlattenStringSet(config.Subnets) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } -func flattenStoppingCondition(stoppingCondition *sagemaker.MonitoringStoppingCondition) []map[string]interface{} { - if stoppingCondition == nil { +func flattenStoppingCondition(config *sagemaker.MonitoringStoppingCondition) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if stoppingCondition.MaxRuntimeInSeconds != nil { - spec["max_runtime_in_seconds"] = aws.Int64Value(stoppingCondition.MaxRuntimeInSeconds) + if config.MaxRuntimeInSeconds != nil { + m["max_runtime_in_seconds"] = aws.Int64Value(config.MaxRuntimeInSeconds) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } func resourceDataQualityJobDefinitionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -986,14 +954,6 @@ func expandDataQualityAppSpecification(configured []interface{}) *sagemaker.Data c.ImageUri = aws.String(v) } - if v, ok := m["container_arguments"].(*schema.Set); ok && v.Len() > 0 { - c.ContainerArguments = flex.ExpandStringSet(v) - } - - if v, ok := m["container_entrypoint"].(*schema.Set); ok && v.Len() > 0 { - c.ContainerEntrypoint = flex.ExpandStringSet(v) - } - if v, ok := m["environment"].(map[string]interface{}); ok && len(v) > 0 { c.Environment = flex.ExpandStringMap(v) } @@ -1018,10 +978,6 @@ func expandDataQualityBaselineConfig(configured []interface{}) *sagemaker.DataQu c := &sagemaker.DataQualityBaselineConfig{} - if v, ok := m["baselining_job_name"].(string); ok && v != "" { - c.BaseliningJobName = aws.String(v) - } - if v, ok := m["constraints_resource"].([]interface{}); ok && len(v) > 0 { c.ConstraintsResource = expandConstraintsResource(v) } diff --git a/internal/service/sagemaker/monitoring_schedule.go b/internal/service/sagemaker/monitoring_schedule.go index 4dc615f019b4..0e826cb2793d 100644 --- a/internal/service/sagemaker/monitoring_schedule.go +++ b/internal/service/sagemaker/monitoring_schedule.go @@ -203,40 +203,40 @@ func resourceMonitoringScheduleRead(ctx context.Context, d *schema.ResourceData, return diags } -func flattenMonitoringScheduleConfig(monitoringScheduleConfig *sagemaker.MonitoringScheduleConfig) []map[string]interface{} { - if monitoringScheduleConfig == nil { +func flattenMonitoringScheduleConfig(config *sagemaker.MonitoringScheduleConfig) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if monitoringScheduleConfig.MonitoringJobDefinitionName != nil { - spec["monitoring_job_definition_name"] = aws.StringValue(monitoringScheduleConfig.MonitoringJobDefinitionName) + if config.MonitoringJobDefinitionName != nil { + m["monitoring_job_definition_name"] = aws.StringValue(config.MonitoringJobDefinitionName) } - if monitoringScheduleConfig.MonitoringType != nil { - spec["monitoring_type"] = aws.StringValue(monitoringScheduleConfig.MonitoringType) + if config.MonitoringType != nil { + m["monitoring_type"] = aws.StringValue(config.MonitoringType) } - if monitoringScheduleConfig.ScheduleConfig != nil { - spec["schedule_config"] = flattenScheduleConfig(monitoringScheduleConfig.ScheduleConfig) + if config.ScheduleConfig != nil { + m["schedule_config"] = flattenScheduleConfig(config.ScheduleConfig) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } -func flattenScheduleConfig(scheduleConfig *sagemaker.ScheduleConfig) []map[string]interface{} { - if scheduleConfig == nil { +func flattenScheduleConfig(config *sagemaker.ScheduleConfig) []map[string]interface{} { + if config == nil { return []map[string]interface{}{} } - spec := map[string]interface{}{} + m := map[string]interface{}{} - if scheduleConfig.ScheduleExpression != nil { - spec["schedule_expression"] = aws.StringValue(scheduleConfig.ScheduleExpression) + if config.ScheduleExpression != nil { + m["schedule_expression"] = aws.StringValue(config.ScheduleExpression) } - return []map[string]interface{}{spec} + return []map[string]interface{}{m} } func resourceMonitoringScheduleUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { From 819a4fbc6cf363300a13da1c59bbbec5e7f7e5d7 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 28 Mar 2023 15:36:48 -0700 Subject: [PATCH 36/49] remove company test requirements --- .../data_quality_job_definition_test.go | 36 ------------------- .../sagemaker/monitoring_schedule_test.go | 18 ---------- 2 files changed, 54 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index e1f8f778e07f..dc48860e2038 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -631,23 +631,6 @@ func testAccCheckDataQualityJobDefinitionExists(ctx context.Context, n string) r func testAccDataQualityJobDefinitionConfig_batchTransformBase(rName string) string { return fmt.Sprintf(` - -provider "aws" { - region = "us-west-2" - - default_tags { - tags = { - "adsk:moniker" = "AMPSDEMO-C-UW2" - } - } -} - -data "aws_caller_identity" "current" {} - -data "aws_iam_policy" "boundary" { - arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/ADSK-Boundary" -} - data "aws_iam_policy_document" "access" { statement { effect = "Allow" @@ -686,7 +669,6 @@ resource "aws_iam_role" "test" { name = %[1]q path = "/" assume_role_policy = data.aws_iam_policy_document.assume_role.json - permissions_boundary = data.aws_iam_policy.boundary.arn } resource "aws_iam_role_policy" "test" { @@ -712,23 +694,6 @@ data "aws_sagemaker_prebuilt_ecr_image" "monitor" { func testAccDataQualityJobDefinitionConfig_endpointBase(rName string) string { return fmt.Sprintf(` - -provider "aws" { - region = "us-west-2" - - default_tags { - tags = { - "adsk:moniker" = "AMPSDEMO-C-UW2" - } - } -} - -data "aws_caller_identity" "current" {} - -data "aws_iam_policy" "boundary" { - arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/ADSK-Boundary" -} - data "aws_iam_policy_document" "access" { statement { effect = "Allow" @@ -767,7 +732,6 @@ resource "aws_iam_role" "test" { name = %[1]q path = "/" assume_role_policy = data.aws_iam_policy_document.assume_role.json - permissions_boundary = data.aws_iam_policy.boundary.arn } resource "aws_iam_role_policy" "test" { diff --git a/internal/service/sagemaker/monitoring_schedule_test.go b/internal/service/sagemaker/monitoring_schedule_test.go index bd2257685a1e..8cfecd139078 100644 --- a/internal/service/sagemaker/monitoring_schedule_test.go +++ b/internal/service/sagemaker/monitoring_schedule_test.go @@ -208,23 +208,6 @@ func testAccCheckMonitoringScheduleExists(ctx context.Context, n string) resourc func testAccMonitoringScheduleConfig_base(rName string) string { return fmt.Sprintf(` - -provider "aws" { - region = "us-west-2" - - default_tags { - tags = { - "adsk:moniker" = "AMPSDEMO-C-UW2" - } - } -} - -data "aws_caller_identity" "current" {} - -data "aws_iam_policy" "boundary" { - arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/ADSK-Boundary" -} - data "aws_iam_policy_document" "access" { statement { effect = "Allow" @@ -263,7 +246,6 @@ resource "aws_iam_role" "test" { name = %[1]q path = "/" assume_role_policy = data.aws_iam_policy_document.assume_role.json - permissions_boundary = data.aws_iam_policy.boundary.arn } resource "aws_iam_role_policy" "test" { From a547dc6fe6daf725d9e9e6b2d62ec263f186131a Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 28 Mar 2023 16:23:44 -0700 Subject: [PATCH 37/49] fix a "monitor" image tag issue --- .../service/sagemaker/data_quality_job_definition_test.go | 4 ++-- internal/service/sagemaker/monitoring_schedule_test.go | 2 +- internal/service/sagemaker/prebuilt_ecr_image_data_source.go | 4 ---- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index dc48860e2038..dd03d3c875b6 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -687,7 +687,7 @@ resource "aws_s3_bucket_acl" "test" { data "aws_sagemaker_prebuilt_ecr_image" "monitor" { repository_name = "sagemaker-model-monitor-analyzer" - image_tag = "" + image_tag = "latest" } `, rName) } @@ -801,7 +801,7 @@ resource "aws_sagemaker_endpoint" "test" { data "aws_sagemaker_prebuilt_ecr_image" "monitor" { repository_name = "sagemaker-model-monitor-analyzer" - image_tag = "" + image_tag = "latest" } `, rName) } diff --git a/internal/service/sagemaker/monitoring_schedule_test.go b/internal/service/sagemaker/monitoring_schedule_test.go index 8cfecd139078..7b21ec2cb2fa 100644 --- a/internal/service/sagemaker/monitoring_schedule_test.go +++ b/internal/service/sagemaker/monitoring_schedule_test.go @@ -264,7 +264,7 @@ resource "aws_s3_bucket_acl" "test" { data "aws_sagemaker_prebuilt_ecr_image" "monitor" { repository_name = "sagemaker-model-monitor-analyzer" - image_tag = "" + image_tag = "latest" } resource "aws_sagemaker_data_quality_job_definition" "test" { diff --git a/internal/service/sagemaker/prebuilt_ecr_image_data_source.go b/internal/service/sagemaker/prebuilt_ecr_image_data_source.go index 2200399746a8..eeba9a2d2a00 100644 --- a/internal/service/sagemaker/prebuilt_ecr_image_data_source.go +++ b/internal/service/sagemaker/prebuilt_ecr_image_data_source.go @@ -475,9 +475,5 @@ func dataSourcePrebuiltECRImageRead(ctx context.Context, d *schema.ResourceData, } func PrebuiltECRImageCreatePath(id, region, suffix, repo, imageTag string) string { - if imageTag == "" { - return fmt.Sprintf("%s.dkr.ecr.%s.%s/%s", id, region, suffix, repo) - } - return fmt.Sprintf("%s.dkr.ecr.%s.%s/%s:%s", id, region, suffix, repo, imageTag) } From b2487f98788efbc562c05f54a8f8b48573e85f36 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Tue, 28 Mar 2023 16:37:53 -0700 Subject: [PATCH 38/49] add changelog for PR --- .changelog/30301.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .changelog/30301.txt diff --git a/.changelog/30301.txt b/.changelog/30301.txt new file mode 100644 index 000000000000..5a52281156d6 --- /dev/null +++ b/.changelog/30301.txt @@ -0,0 +1,11 @@ +```release-note:new-resource +aws_sagemaker_monitoring_schedule +``` + +```release-note:new-resource +aws_sagemaker_data_quality_job_definition +``` + +```release-note:enhancement +data-source/aws_sagemaker_prebuilt_ecr_image: Added sagemaker-model-monitor-analyzer images +``` From 891884a0df7f89592232add19da97802129d7697 Mon Sep 17 00:00:00 2001 From: Bill Booth Date: Thu, 6 Apr 2023 21:29:37 -0700 Subject: [PATCH 39/49] remove monitoring schedule from branch --- .changelog/30301.txt | 4 - internal/service/sagemaker/find.go | 25 -- .../service/sagemaker/monitoring_schedule.go | 300 -------------- .../sagemaker/monitoring_schedule_test.go | 375 ------------------ .../service/sagemaker/service_package_gen.go | 4 - internal/service/sagemaker/status.go | 16 - internal/service/sagemaker/wait.go | 94 ++--- ...agemaker_monitoring_schedule.html.markdown | 58 --- 8 files changed, 25 insertions(+), 851 deletions(-) delete mode 100644 internal/service/sagemaker/monitoring_schedule.go delete mode 100644 internal/service/sagemaker/monitoring_schedule_test.go delete mode 100644 website/docs/r/sagemaker_monitoring_schedule.html.markdown diff --git a/.changelog/30301.txt b/.changelog/30301.txt index 5a52281156d6..325a10aa5570 100644 --- a/.changelog/30301.txt +++ b/.changelog/30301.txt @@ -1,7 +1,3 @@ -```release-note:new-resource -aws_sagemaker_monitoring_schedule -``` - ```release-note:new-resource aws_sagemaker_data_quality_job_definition ``` diff --git a/internal/service/sagemaker/find.go b/internal/service/sagemaker/find.go index 662e1bc58042..ab003ec98f0d 100644 --- a/internal/service/sagemaker/find.go +++ b/internal/service/sagemaker/find.go @@ -522,31 +522,6 @@ func FindDataQualityJobDefinitionByName(ctx context.Context, conn *sagemaker.Sag return output, nil } -func FindMonitoringScheduleByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeMonitoringScheduleOutput, error) { - input := &sagemaker.DescribeMonitoringScheduleInput{ - MonitoringScheduleName: aws.String(name), - } - - output, err := conn.DescribeMonitoringScheduleWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { - return nil, &resource.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - func FindFlowDefinitionByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeFlowDefinitionOutput, error) { input := &sagemaker.DescribeFlowDefinitionInput{ FlowDefinitionName: aws.String(name), diff --git a/internal/service/sagemaker/monitoring_schedule.go b/internal/service/sagemaker/monitoring_schedule.go deleted file mode 100644 index 0e826cb2793d..000000000000 --- a/internal/service/sagemaker/monitoring_schedule.go +++ /dev/null @@ -1,300 +0,0 @@ -package sagemaker - -import ( - "context" - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sagemaker" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/verify" -) - -// @SDKResource("aws_sagemaker_monitoring_schedule") -func ResourceMonitoringSchedule() *schema.Resource { - return &schema.Resource{ - CreateWithoutTimeout: resourceMonitoringScheduleCreate, - ReadWithoutTimeout: resourceMonitoringScheduleRead, - UpdateWithoutTimeout: resourceMonitoringScheduleUpdate, - DeleteWithoutTimeout: resourceMonitoringScheduleDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validName, - }, - "monitoring_schedule_config": { - Type: schema.TypeList, - MaxItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "monitoring_job_definition_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validName, - }, - "monitoring_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(sagemaker.MonitoringType_Values(), false), - }, - "schedule_config": { - Type: schema.TypeList, - MaxItems: 1, - Computed: true, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "schedule_expression": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringMatch(regexp.MustCompile(`^cron`), ""), - validation.StringLenBetween(1, 512), - ), - }, - }, - }, - }, - }, - }, - }, - "tags": tftags.TagsSchema(), - "tags_all": tftags.TagsSchemaComputed(), - }, - CustomizeDiff: verify.SetTagsDiff, - } -} - -func resourceMonitoringScheduleCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SageMakerConn() - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - tags := defaultTagsConfig.MergeTags(tftags.New(ctx, d.Get("tags").(map[string]interface{}))) - - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } else { - name = resource.UniqueId() - } - - createOpts := &sagemaker.CreateMonitoringScheduleInput{ - MonitoringScheduleName: aws.String(name), - MonitoringScheduleConfig: expandMonitoringScheduleConfig(d.Get("monitoring_schedule_config").([]interface{})), - } - - if len(tags) > 0 { - createOpts.Tags = Tags(tags.IgnoreAWS()) - } - - log.Printf("[DEBUG] SageMaker Monitoring Schedule create config: %#v", *createOpts) - _, err := conn.CreateMonitoringScheduleWithContext(ctx, createOpts) - if err != nil { - return sdkdiag.AppendErrorf(diags, "creating SageMaker Monitoring Schedule: %s", err) - } - d.SetId(name) - if _, err := WaitMonitoringScheduleScheduled(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "creating SageMaker Monitoring Schedule (%s): waiting for completion: %s", d.Id(), err) - } - - return append(diags, resourceMonitoringScheduleRead(ctx, d, meta)...) -} - -func expandMonitoringScheduleConfig(configured []interface{}) *sagemaker.MonitoringScheduleConfig { - if len(configured) == 0 { - return nil - } - - m := configured[0].(map[string]interface{}) - - c := &sagemaker.MonitoringScheduleConfig{} - - if v, ok := m["monitoring_job_definition_name"].(string); ok && v != "" { - c.MonitoringJobDefinitionName = aws.String(v) - } - - if v, ok := m["monitoring_type"].(string); ok && v != "" { - c.MonitoringType = aws.String(v) - } - - if v, ok := m["schedule_config"].([]interface{}); ok && len(v) > 0 { - c.ScheduleConfig = expandScheduleConfig(v) - } - - return c -} - -func expandScheduleConfig(configured []interface{}) *sagemaker.ScheduleConfig { - if len(configured) == 0 { - return nil - } - - m := configured[0].(map[string]interface{}) - - c := &sagemaker.ScheduleConfig{} - - if v, ok := m["schedule_expression"].(string); ok && v != "" { - c.ScheduleExpression = aws.String(v) - } - - return c -} - -func resourceMonitoringScheduleRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SageMakerConn() - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - - monitoringSchedule, err := FindMonitoringScheduleByName(ctx, conn, d.Id()) - - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { - log.Printf("[WARN] SageMaker Monitoring Schedule (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading SageMaker Monitoring Schedule (%s): %s", d.Id(), err) - } - - d.Set("arn", monitoringSchedule.MonitoringScheduleArn) - d.Set("name", monitoringSchedule.MonitoringScheduleName) - - if err := d.Set("monitoring_schedule_config", flattenMonitoringScheduleConfig(monitoringSchedule.MonitoringScheduleConfig)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting monitoring_schedule_config for SageMaker Monitoring Schedule (%s): %s", d.Id(), err) - } - - tags, err := ListTags(ctx, conn, aws.StringValue(monitoringSchedule.MonitoringScheduleArn)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing tags for SageMaker Monitoring Schedule (%s): %s", d.Id(), err) - } - - tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - - //lintignore:AWSR002 - if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } - - if err := d.Set("tags_all", tags.Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags_all: %s", err) - } - - return diags -} - -func flattenMonitoringScheduleConfig(config *sagemaker.MonitoringScheduleConfig) []map[string]interface{} { - if config == nil { - return []map[string]interface{}{} - } - - m := map[string]interface{}{} - - if config.MonitoringJobDefinitionName != nil { - m["monitoring_job_definition_name"] = aws.StringValue(config.MonitoringJobDefinitionName) - } - - if config.MonitoringType != nil { - m["monitoring_type"] = aws.StringValue(config.MonitoringType) - } - - if config.ScheduleConfig != nil { - m["schedule_config"] = flattenScheduleConfig(config.ScheduleConfig) - } - - return []map[string]interface{}{m} -} - -func flattenScheduleConfig(config *sagemaker.ScheduleConfig) []map[string]interface{} { - if config == nil { - return []map[string]interface{}{} - } - - m := map[string]interface{}{} - - if config.ScheduleExpression != nil { - m["schedule_expression"] = aws.StringValue(config.ScheduleExpression) - } - - return []map[string]interface{}{m} -} - -func resourceMonitoringScheduleUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SageMakerConn() - - if d.HasChange("tags_all") { - o, n := d.GetChange("tags_all") - - if err := UpdateTags(ctx, conn, d.Get("arn").(string), o, n); err != nil { - return sdkdiag.AppendErrorf(diags, "updating SageMaker Monitoring Schedule (%s) tags: %s", d.Id(), err) - } - } - - if d.HasChanges("monitoring_schedule_config") { - modifyOpts := &sagemaker.UpdateMonitoringScheduleInput{ - MonitoringScheduleName: aws.String(d.Id()), - } - - if v, ok := d.GetOk("monitoring_schedule_config"); ok && (len(v.([]interface{})) > 0) { - modifyOpts.MonitoringScheduleConfig = expandMonitoringScheduleConfig(v.([]interface{})) - } - - log.Printf("[INFO] Modifying monitoring_schedule_config attribute for %s: %#v", d.Id(), modifyOpts) - if _, err := conn.UpdateMonitoringScheduleWithContext(ctx, modifyOpts); err != nil { - return sdkdiag.AppendErrorf(diags, "updating SageMaker Monitoring Schedule (%s): %s", d.Id(), err) - } - if _, err := WaitMonitoringScheduleScheduled(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "creating SageMaker Monitoring Schedule (%s): waiting for completion: %s", d.Id(), err) - } - } - - return append(diags, resourceMonitoringScheduleRead(ctx, d, meta)...) -} - -func resourceMonitoringScheduleDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SageMakerConn() - - deleteOpts := &sagemaker.DeleteMonitoringScheduleInput{ - MonitoringScheduleName: aws.String(d.Id()), - } - log.Printf("[INFO] Deleting SageMaker Monitoring Schedule : %s", d.Id()) - - _, err := conn.DeleteMonitoringScheduleWithContext(ctx, deleteOpts) - - if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { - return diags - } - - if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting SageMaker Monitoring Schedule (%s): %s", d.Id(), err) - } - - if _, err := WaitMonitoringScheduleNotFound(ctx, conn, d.Id()); err != nil { - if !tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { - return sdkdiag.AppendErrorf(diags, "waiting for SageMaker Monitoring Schedule (%s) to stop: %s", d.Id(), err) - } - } - return diags -} diff --git a/internal/service/sagemaker/monitoring_schedule_test.go b/internal/service/sagemaker/monitoring_schedule_test.go deleted file mode 100644 index 7b21ec2cb2fa..000000000000 --- a/internal/service/sagemaker/monitoring_schedule_test.go +++ /dev/null @@ -1,375 +0,0 @@ -package sagemaker_test - -import ( - "context" - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/service/sagemaker" - sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" - tfsagemaker "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func TestAccSageMakerMonitoringSchedule_basic(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_sagemaker_monitoring_schedule.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckMonitoringScheduleDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccMonitoringScheduleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckMonitoringScheduleExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("monitoring-schedule/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "monitoring_schedule_config.0.monitoring_job_definition_name", "aws_sagemaker_data_quality_job_definition.test", "name"), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.monitoring_type", "DataQuality"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccSageMakerMonitoringSchedule_tags(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_sagemaker_monitoring_schedule.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckMonitoringScheduleDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccMonitoringScheduleConfig_tags1(rName, "key1", "value1"), - Check: resource.ComposeTestCheckFunc( - testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccMonitoringScheduleConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - { - Config: testAccMonitoringScheduleConfig_tags1(rName, "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - }, - }) -} - -func TestAccSageMakerMonitoringSchedule_scheduleExpression(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_sagemaker_monitoring_schedule.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckMonitoringScheduleDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccMonitoringScheduleConfig_scheduleExpressionHourly(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.0.schedule_expression", "cron(0 * ? * * *)"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccMonitoringScheduleConfig_scheduleExpressionDaily(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.0.schedule_expression", "cron(0 0 ? * * *)"), - ), - }, - { - Config: testAccMonitoringScheduleConfig_scheduleExpressionHourly(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "monitoring_schedule_config.0.schedule_config.0.schedule_expression", "cron(0 * ? * * *)"), - ), - }, - }, - }) -} - -func TestAccSageMakerMonitoringSchedule_disappears(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_sagemaker_monitoring_schedule.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckMonitoringScheduleDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccMonitoringScheduleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckMonitoringScheduleExists(ctx, resourceName), - acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsagemaker.ResourceMonitoringSchedule(), resourceName), - acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsagemaker.ResourceMonitoringSchedule(), resourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckMonitoringScheduleDestroy(ctx context.Context) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn() - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_sagemaker_monitoring_schedule" { - continue - } - - _, err := tfsagemaker.FindMonitoringScheduleByName(ctx, conn, rs.Primary.ID) - - if tfresource.NotFound(err) { - continue - } - - if err != nil { - return err - } - - return fmt.Errorf("SageMaker Monitoring Schedule (%s) still exists", rs.Primary.ID) - } - return nil - } -} - -func testAccCheckMonitoringScheduleExists(ctx context.Context, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("no SageMaker Monitoring Schedule ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn() - _, err := tfsagemaker.FindMonitoringScheduleByName(ctx, conn, rs.Primary.ID) - - return err - } -} - -func testAccMonitoringScheduleConfig_base(rName string) string { - return fmt.Sprintf(` -data "aws_iam_policy_document" "access" { - statement { - effect = "Allow" - - actions = [ - "cloudwatch:PutMetricData", - "logs:CreateLogStream", - "logs:PutLogEvents", - "logs:CreateLogGroup", - "logs:DescribeLogStreams", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:BatchGetImage", - "s3:GetObject", - ] - - resources = ["*"] - } -} - -data "aws_partition" "current" {} - -data "aws_iam_policy_document" "assume_role" { - statement { - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["sagemaker.${data.aws_partition.current.dns_suffix}"] - } - } -} - -resource "aws_iam_role" "test" { - name = %[1]q - path = "/" - assume_role_policy = data.aws_iam_policy_document.assume_role.json -} - -resource "aws_iam_role_policy" "test" { - role = aws_iam_role.test.name - policy = data.aws_iam_policy_document.access.json -} - -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.test.id - acl = "private" -} - -data "aws_sagemaker_prebuilt_ecr_image" "monitor" { - repository_name = "sagemaker-model-monitor-analyzer" - image_tag = "latest" -} - -resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q - data_quality_app_specification { - image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path - } - data_quality_job_input { - batch_transform_input { - data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" - dataset_format { - csv {} - } - } - } - data_quality_job_output_config { - monitoring_outputs { - s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" - } - } - } - job_resources { - cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" - volume_size_in_gb = 20 - } - } - role_arn = aws_iam_role.test.arn -} -`, rName) -} - -func testAccMonitoringScheduleConfig_basic(rName string) string { - return testAccMonitoringScheduleConfig_base(rName) + fmt.Sprintf(` -resource "aws_sagemaker_monitoring_schedule" "test" { - name = %[1]q - monitoring_schedule_config { - monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name - monitoring_type = "DataQuality" - } -} -`, rName) -} - -func testAccMonitoringScheduleConfig_tags1(rName string, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccMonitoringScheduleConfig_base(rName), fmt.Sprintf(` -resource "aws_sagemaker_monitoring_schedule" "test" { - name = %[1]q - monitoring_schedule_config { - monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name - monitoring_type = "DataQuality" - } - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1)) -} - -func testAccMonitoringScheduleConfig_tags2(rName string, tagKey1, tagValue1 string, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccMonitoringScheduleConfig_base(rName), fmt.Sprintf(` -resource "aws_sagemaker_monitoring_schedule" "test" { - name = %[1]q - monitoring_schedule_config { - monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name - monitoring_type = "DataQuality" - } - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) -} - -func testAccMonitoringScheduleConfig_scheduleExpressionHourly(rName string) string { - return acctest.ConfigCompose(testAccMonitoringScheduleConfig_base(rName), fmt.Sprintf(` -resource "aws_sagemaker_monitoring_schedule" "test" { - name = %[1]q - monitoring_schedule_config { - monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name - monitoring_type = "DataQuality" - schedule_config { - schedule_expression = "cron(0 * ? * * *)" - } - } -} -`, rName)) -} - -func testAccMonitoringScheduleConfig_scheduleExpressionDaily(rName string) string { - return acctest.ConfigCompose(testAccMonitoringScheduleConfig_base(rName), fmt.Sprintf(` -resource "aws_sagemaker_monitoring_schedule" "test" { - name = %[1]q - monitoring_schedule_config { - monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name - monitoring_type = "DataQuality" - schedule_config { - schedule_expression = "cron(0 0 ? * * *)" - } - } -} -`, rName)) -} diff --git a/internal/service/sagemaker/service_package_gen.go b/internal/service/sagemaker/service_package_gen.go index bd7c128e3581..60507e5f3b9f 100644 --- a/internal/service/sagemaker/service_package_gen.go +++ b/internal/service/sagemaker/service_package_gen.go @@ -98,10 +98,6 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka Factory: ResourceModelPackageGroupPolicy, TypeName: "aws_sagemaker_model_package_group_policy", }, - { - Factory: ResourceMonitoringSchedule, - TypeName: "aws_sagemaker_monitoring_schedule", - }, { Factory: ResourceNotebookInstance, TypeName: "aws_sagemaker_notebook_instance", diff --git a/internal/service/sagemaker/status.go b/internal/service/sagemaker/status.go index 820590c0dcbf..12847414536d 100644 --- a/internal/service/sagemaker/status.go +++ b/internal/service/sagemaker/status.go @@ -250,19 +250,3 @@ func StatusSpace(ctx context.Context, conn *sagemaker.SageMaker, domainId, name return output, aws.StringValue(output.Status), nil } } - -func StatusMonitoringSchedule(ctx context.Context, conn *sagemaker.SageMaker, name string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindMonitoringScheduleByName(ctx, conn, name) - - if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.MonitoringScheduleStatus), nil - } -} diff --git a/internal/service/sagemaker/wait.go b/internal/service/sagemaker/wait.go index 115e89465dbe..12c155ad0dcc 100644 --- a/internal/service/sagemaker/wait.go +++ b/internal/service/sagemaker/wait.go @@ -12,33 +12,31 @@ import ( ) const ( - NotebookInstanceInServiceTimeout = 60 * time.Minute - NotebookInstanceStoppedTimeout = 10 * time.Minute - NotebookInstanceDeletedTimeout = 10 * time.Minute - ModelPackageGroupCompletedTimeout = 10 * time.Minute - ModelPackageGroupDeletedTimeout = 10 * time.Minute - ImageCreatedTimeout = 10 * time.Minute - ImageDeletedTimeout = 10 * time.Minute - ImageVersionCreatedTimeout = 10 * time.Minute - ImageVersionDeletedTimeout = 10 * time.Minute - DomainInServiceTimeout = 10 * time.Minute - DomainDeletedTimeout = 10 * time.Minute - FeatureGroupCreatedTimeout = 10 * time.Minute - FeatureGroupDeletedTimeout = 10 * time.Minute - UserProfileInServiceTimeout = 10 * time.Minute - UserProfileDeletedTimeout = 10 * time.Minute - AppInServiceTimeout = 10 * time.Minute - AppDeletedTimeout = 10 * time.Minute - FlowDefinitionActiveTimeout = 2 * time.Minute - FlowDefinitionDeletedTimeout = 2 * time.Minute - ProjectCreatedTimeout = 15 * time.Minute - ProjectDeletedTimeout = 15 * time.Minute - WorkforceActiveTimeout = 10 * time.Minute - WorkforceDeletedTimeout = 10 * time.Minute - SpaceDeletedTimeout = 10 * time.Minute - SpaceInServiceTimeout = 10 * time.Minute - MonitoringScheduleScheduledTimeout = 2 * time.Minute - MonitoringScheduleStoppedTimeout = 2 * time.Minute + NotebookInstanceInServiceTimeout = 60 * time.Minute + NotebookInstanceStoppedTimeout = 10 * time.Minute + NotebookInstanceDeletedTimeout = 10 * time.Minute + ModelPackageGroupCompletedTimeout = 10 * time.Minute + ModelPackageGroupDeletedTimeout = 10 * time.Minute + ImageCreatedTimeout = 10 * time.Minute + ImageDeletedTimeout = 10 * time.Minute + ImageVersionCreatedTimeout = 10 * time.Minute + ImageVersionDeletedTimeout = 10 * time.Minute + DomainInServiceTimeout = 10 * time.Minute + DomainDeletedTimeout = 10 * time.Minute + FeatureGroupCreatedTimeout = 10 * time.Minute + FeatureGroupDeletedTimeout = 10 * time.Minute + UserProfileInServiceTimeout = 10 * time.Minute + UserProfileDeletedTimeout = 10 * time.Minute + AppInServiceTimeout = 10 * time.Minute + AppDeletedTimeout = 10 * time.Minute + FlowDefinitionActiveTimeout = 2 * time.Minute + FlowDefinitionDeletedTimeout = 2 * time.Minute + ProjectCreatedTimeout = 15 * time.Minute + ProjectDeletedTimeout = 15 * time.Minute + WorkforceActiveTimeout = 10 * time.Minute + WorkforceDeletedTimeout = 10 * time.Minute + SpaceDeletedTimeout = 10 * time.Minute + SpaceInServiceTimeout = 10 * time.Minute ) // WaitNotebookInstanceInService waits for a NotebookInstance to return InService @@ -646,45 +644,3 @@ func WaitSpaceDeleted(ctx context.Context, conn *sagemaker.SageMaker, domainId, return nil, err } - -func WaitMonitoringScheduleScheduled(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeMonitoringScheduleOutput, error) { - stateConf := &resource.StateChangeConf{ - Pending: []string{sagemaker.ScheduleStatusPending}, - Target: []string{sagemaker.ScheduleStatusScheduled}, - Refresh: StatusMonitoringSchedule(ctx, conn, name), - Timeout: MonitoringScheduleScheduledTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*sagemaker.DescribeMonitoringScheduleOutput); ok { - if status, reason := aws.StringValue(output.MonitoringScheduleStatus), aws.StringValue(output.FailureReason); status == sagemaker.ScheduleStatusFailed && reason != "" { - tfresource.SetLastError(err, errors.New(reason)) - } - - return output, err - } - - return nil, err -} - -func WaitMonitoringScheduleNotFound(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeMonitoringScheduleOutput, error) { - stateConf := &resource.StateChangeConf{ - Pending: []string{sagemaker.ScheduleStatusScheduled, sagemaker.ScheduleStatusPending, sagemaker.ScheduleStatusStopped}, - Target: []string{}, - Refresh: StatusMonitoringSchedule(ctx, conn, name), - Timeout: MonitoringScheduleStoppedTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*sagemaker.DescribeMonitoringScheduleOutput); ok { - if status, reason := aws.StringValue(output.MonitoringScheduleStatus), aws.StringValue(output.FailureReason); status == sagemaker.ScheduleStatusFailed && reason != "" { - tfresource.SetLastError(err, errors.New(reason)) - } - - return output, err - } - - return nil, err -} diff --git a/website/docs/r/sagemaker_monitoring_schedule.html.markdown b/website/docs/r/sagemaker_monitoring_schedule.html.markdown deleted file mode 100644 index 00311ca42dc0..000000000000 --- a/website/docs/r/sagemaker_monitoring_schedule.html.markdown +++ /dev/null @@ -1,58 +0,0 @@ ---- -subcategory: "SageMaker" -layout: "aws" -page_title: "AWS: aws_sagemaker_monitoring_schedule" -description: |- - Provides a SageMaker Monitoring Schedule resource. ---- - -# Resource: aws_sagemaker_data_quality_job_definition - -Provides a SageMaker data quality job definition resource. - -## Example Usage - -Basic usage: - -```terraform -resource "aws_sagemaker_monitoring_schedule" "test" { - name = "my-monitoring-schedule" - monitoring_schedule_config { - monitoring_job_definition_name = aws_sagemaker_data_quality_job_definition.test.name - monitoring_type = "DataQuality" - } -} -``` - -## Argument Reference - -The following arguments are supported: - -* `monitoring_schedule_config` - (Required) The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below. -* `name` - (Optional) The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, Terraform will assign a random, unique name. -* `tags` - (Optional) A mapping of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. - -### monitoring_schedule_config - -* `monitoring_job_definition_name` - (Required) The name of the monitoring job definition to schedule. -* `monitoring_type` - (Required) The type of the monitoring job definition to schedule. Valid values are `DataQuality`, `ModelQuality`, `ModelBias` or `ModelExplainability` -* `schedule_config` - (Optional) Configures the monitoring schedule. Fields are documented below. - -#### schedule_config -* `schedule_expression` - (Required) A cron expression that describes details about the monitoring schedule. For example, and hourly schedule would be `cron(0 * ? * * *)`. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this monitoring schedule. -* `name` - The name of the monitoring schedule. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - -## Import - -Monitoring schedules can be imported using the `name`, e.g., - -``` -$ terraform import aws_sagemaker_monitoring_schedule.test_monitoring_schedule monitoring-schedule-foo -``` From 3551c2f124e4eb15f759844ad9cef65f008edc27 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 16:39:00 -0400 Subject: [PATCH 40/49] r/aws_sagemaker_data_quality_job_definition: Fix acceptance test terrafmt errors. --- .../data_quality_job_definition_test.go | 211 +++++++++--------- 1 file changed, 106 insertions(+), 105 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index dd03d3c875b6..5dd0d7605d0b 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -680,11 +680,6 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } -resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.test.id - acl = "private" -} - data "aws_sagemaker_prebuilt_ecr_image" "monitor" { repository_name = "sagemaker-model-monitor-analyzer" image_tag = "latest" @@ -743,11 +738,6 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } -resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.test.id - acl = "private" -} - resource "aws_s3_object" "test" { bucket = aws_s3_bucket.test.id key = "model.tar.gz" @@ -784,7 +774,9 @@ resource "aws_sagemaker_endpoint_configuration" "test" { data_capture_config { initial_sampling_percentage = 100 + destination_s3_uri = "s3://${aws_s3_bucket.test.bucket_regional_domain_name}/capture" + capture_options { capture_mode = "Input" } @@ -807,9 +799,9 @@ data "aws_sagemaker_prebuilt_ecr_image" "monitor" { } func testAccDataQualityJobDefinitionConfig_endpointBasic(rName string) string { - return testAccDataQualityJobDefinitionConfig_endpointBase(rName) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_endpointBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -821,24 +813,24 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } role_arn = aws_iam_role.test.arn } -`, rName) +`, rName)) } func testAccDataQualityJobDefinitionConfig_appSpecificationOptional(rName string) string { - return testAccDataQualityJobDefinitionConfig_batchTransformBase(rName) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q data_quality_app_specification { @@ -846,7 +838,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { environment = { foo = "bar" } - record_preprocessor_source_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/pre.sh" + record_preprocessor_source_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/pre.sh" post_analytics_processor_source_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/post.sh" } data_quality_job_input { @@ -860,26 +852,26 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } role_arn = aws_iam_role.test.arn } -`, rName) +`, rName)) } func testAccDataQualityJobDefinitionConfig_baselineConfig(rName string) string { - return testAccDataQualityJobDefinitionConfig_batchTransformBase(rName) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -902,26 +894,26 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } role_arn = aws_iam_role.test.arn } -`, rName) +`, rName)) } func testAccDataQualityJobDefinitionConfig_batchTransformBasic(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -936,14 +928,14 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } @@ -955,7 +947,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { func testAccDataQualityJobDefinitionConfig_batchTransformCsvHeader(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -964,22 +956,22 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" dataset_format { csv { - header = true - } + header = true + } } } } data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } @@ -991,7 +983,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { func testAccDataQualityJobDefinitionConfig_batchTransformJson(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1006,14 +998,14 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } @@ -1025,7 +1017,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { func testAccDataQualityJobDefinitionConfig_batchTransformJsonLine(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1034,15 +1026,15 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" dataset_format { json { - line = true - } + line = true + } } } } data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } @@ -1061,7 +1053,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { func testAccDataQualityJobDefinitionConfig_batchTransformOptional(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1071,22 +1063,22 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { dataset_format { csv {} } - local_path = "/opt/ml/processing/local_path" + local_path = "/opt/ml/processing/local_path" s3_data_distribution_type = "ShardedByS3Key" - s3_input_mode = "Pipe" + s3_input_mode = "Pipe" } } data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } @@ -1096,37 +1088,37 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } func testAccDataQualityJobDefinitionConfig_endpointOptional(rName string) string { - return testAccDataQualityJobDefinitionConfig_endpointBase(rName) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_endpointBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } data_quality_job_input { endpoint_input { - endpoint_name = aws_sagemaker_endpoint.test.name - local_path = "/opt/ml/processing/local_path" + endpoint_name = aws_sagemaker_endpoint.test.name + local_path = "/opt/ml/processing/local_path" s3_data_distribution_type = "ShardedByS3Key" - s3_input_mode = "Pipe" + s3_input_mode = "Pipe" } } data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } role_arn = aws_iam_role.test.arn } -`, rName) +`, rName)) } func testAccDataQualityJobDefinitionConfig_outputConfigKmsKeyId(rName string) string { @@ -1137,7 +1129,7 @@ resource "aws_kms_key" "test" { } resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1153,14 +1145,14 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { kms_key_id = aws_kms_key.test.arn monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } @@ -1172,7 +1164,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { func testAccDataQualityJobDefinitionConfig_outputConfigOptional(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1187,16 +1179,16 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" - s3_upload_mode = "Continuous" - local_path = "/opt/ml/processing/local_path" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_upload_mode = "Continuous" + local_path = "/opt/ml/processing/local_path" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } @@ -1213,7 +1205,7 @@ resource "aws_kms_key" "test" { } resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1228,14 +1220,14 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 volume_kms_key_id = aws_kms_key.test.arn } @@ -1248,7 +1240,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { func testAccDataQualityJobDefinitionConfig_stoppingCondition(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1263,14 +1255,14 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } @@ -1285,7 +1277,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { func testAccDataQualityJobDefinitionConfig_tags1(rName string, tagKey1, tagValue1 string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1300,14 +1292,14 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } @@ -1323,7 +1315,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { func testAccDataQualityJobDefinitionConfig_tags2(rName string, tagKey1, tagValue1 string, tagKey2, tagValue2 string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1338,14 +1330,14 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } @@ -1364,15 +1356,18 @@ func testAccDataQualityJobDefinitionConfig_networkConfig(rName string) string { acctest.ConfigVPCWithSubnets(rName, 1), testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` - resource "aws_security_group" "test" { count = 1 name = "%[1]s-${count.index}" + + tags = { + Name = %[1]q + } } resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1387,20 +1382,20 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } network_config { vpc_config { - subnets = aws_subnet.test[*].id + subnets = aws_subnet.test[*].id security_group_ids = aws_security_group.test[*].id } } @@ -1414,15 +1409,18 @@ func testAccDataQualityJobDefinitionConfig_networkConfigTrafficEncryption(rName acctest.ConfigVPCWithSubnets(rName, 1), testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` - resource "aws_security_group" "test" { count = 1 name = "%[1]s-${count.index}" + + tags = { + Name = %[1]q + } } resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1437,21 +1435,21 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } network_config { enable_inter_container_traffic_encryption = true vpc_config { - subnets = aws_subnet.test[*].id + subnets = aws_subnet.test[*].id security_group_ids = aws_security_group.test[*].id } } @@ -1465,15 +1463,18 @@ func testAccDataQualityJobDefinitionConfig_networkConfigEnableNetworkIsolation(r acctest.ConfigVPCWithSubnets(rName, 1), testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` - resource "aws_security_group" "test" { count = 1 name = "%[1]s-${count.index}" + + tags = { + Name = %[1]q + } } resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path } @@ -1488,21 +1489,21 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } network_config { enable_network_isolation = true vpc_config { - subnets = aws_subnet.test[*].id + subnets = aws_subnet.test[*].id security_group_ids = aws_security_group.test[*].id } } From 57e4f6501a210fc790c4978519570a6d12cd4af6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 16:51:25 -0400 Subject: [PATCH 41/49] Fix semgrep 'ci.caps2-in-func-name'. --- .../sagemaker/data_quality_job_definition.go | 86 ++++++++++--------- .../data_quality_job_definition_test.go | 18 ++-- 2 files changed, 53 insertions(+), 51 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index b18ec1b58131..90eb665b21c4 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -26,9 +26,11 @@ func ResourceDataQualityJobDefinition() *schema.Resource { ReadWithoutTimeout: resourceDataQualityJobDefinitionRead, UpdateWithoutTimeout: resourceDataQualityJobDefinitionUpdate, DeleteWithoutTimeout: resourceDataQualityJobDefinitionDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, + Schema: map[string]*schema.Schema{ "arn": { Type: schema.TypeString, @@ -467,8 +469,8 @@ func resourceDataQualityJobDefinitionCreate(ctx context.Context, d *schema.Resou JobDefinitionName: aws.String(name), DataQualityAppSpecification: expandDataQualityAppSpecification(d.Get("data_quality_app_specification").([]interface{})), DataQualityJobInput: expandDataQualityJobInput(d.Get("data_quality_job_input").([]interface{})), - DataQualityJobOutputConfig: expandDataQualityJobOutputConfig(d.Get("data_quality_job_output_config").([]interface{})), - JobResources: expandJobResources(d.Get("job_resources").([]interface{})), + DataQualityJobOutputConfig: expandMonitoringOutputConfig(d.Get("data_quality_job_output_config").([]interface{})), + JobResources: expandMonitoringResources(d.Get("job_resources").([]interface{})), RoleArn: aws.String(roleArn), } @@ -477,11 +479,11 @@ func resourceDataQualityJobDefinitionCreate(ctx context.Context, d *schema.Resou } if v, ok := d.GetOk("network_config"); ok && len(v.([]interface{})) > 0 { - createOpts.NetworkConfig = expandNetworkConfig(v.([]interface{})) + createOpts.NetworkConfig = expandMonitoringNetworkConfig(v.([]interface{})) } if v, ok := d.GetOk("stopping_condition"); ok && len(v.([]interface{})) > 0 { - createOpts.StoppingCondition = expandStoppingCondition(v.([]interface{})) + createOpts.StoppingCondition = expandMonitoringStoppingCondition(v.([]interface{})) } if len(tags) > 0 { @@ -532,19 +534,19 @@ func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.Resourc return sdkdiag.AppendErrorf(diags, "setting data_quality_job_input for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) } - if err := d.Set("data_quality_job_output_config", flattenDataQualityJobOutputConfig(jobDefinition.DataQualityJobOutputConfig)); err != nil { + if err := d.Set("data_quality_job_output_config", flattenMonitoringOutputConfig(jobDefinition.DataQualityJobOutputConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting data_quality_job_output_config for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) } - if err := d.Set("job_resources", flattenJobResources(jobDefinition.JobResources)); err != nil { + if err := d.Set("job_resources", flattenMonitoringResources(jobDefinition.JobResources)); err != nil { return sdkdiag.AppendErrorf(diags, "setting job_resources for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) } - if err := d.Set("network_config", flattenNetworkConfig(jobDefinition.NetworkConfig)); err != nil { + if err := d.Set("network_config", flattenMonitoringNetworkConfig(jobDefinition.NetworkConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting network_config for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) } - if err := d.Set("stopping_condition", flattenStoppingCondition(jobDefinition.StoppingCondition)); err != nil { + if err := d.Set("stopping_condition", flattenMonitoringStoppingCondition(jobDefinition.StoppingCondition)); err != nil { return sdkdiag.AppendErrorf(diags, "setting stopping_condition for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) } @@ -605,7 +607,7 @@ func flattenDataQualityBaselineConfig(config *sagemaker.DataQualityBaselineConfi } if config.StatisticsResource != nil { - m["statistics_resource"] = flattenStatisticsResource(config.StatisticsResource) + m["statistics_resource"] = flattenMonitoringStatisticsResource(config.StatisticsResource) } return []map[string]interface{}{m} @@ -625,7 +627,7 @@ func flattenConstraintsResource(config *sagemaker.MonitoringConstraintsResource) return []map[string]interface{}{m} } -func flattenStatisticsResource(config *sagemaker.MonitoringStatisticsResource) []map[string]interface{} { +func flattenMonitoringStatisticsResource(config *sagemaker.MonitoringStatisticsResource) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -673,7 +675,7 @@ func flattenBatchTransformInput(config *sagemaker.BatchTransformInput_) []map[st } if config.DatasetFormat != nil { - m["dataset_format"] = flattenDatasetFormat(config.DatasetFormat) + m["dataset_format"] = flattenMonitoringDatasetFormat(config.DatasetFormat) } if config.S3DataDistributionType != nil { @@ -687,7 +689,7 @@ func flattenBatchTransformInput(config *sagemaker.BatchTransformInput_) []map[st return []map[string]interface{}{m} } -func flattenDatasetFormat(config *sagemaker.MonitoringDatasetFormat) []map[string]interface{} { +func flattenMonitoringDatasetFormat(config *sagemaker.MonitoringDatasetFormat) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -695,17 +697,17 @@ func flattenDatasetFormat(config *sagemaker.MonitoringDatasetFormat) []map[strin m := map[string]interface{}{} if config.Csv != nil { - m["csv"] = flattenCsv(config.Csv) + m["csv"] = flattenMonitoringCSVDatasetFormat(config.Csv) } if config.Json != nil { - m["json"] = flattenJson(config.Json) + m["json"] = flattenMonitoringJSONDatasetFormat(config.Json) } return []map[string]interface{}{m} } -func flattenCsv(config *sagemaker.MonitoringCsvDatasetFormat) []map[string]interface{} { +func flattenMonitoringCSVDatasetFormat(config *sagemaker.MonitoringCsvDatasetFormat) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -719,7 +721,7 @@ func flattenCsv(config *sagemaker.MonitoringCsvDatasetFormat) []map[string]inter return []map[string]interface{}{m} } -func flattenJson(config *sagemaker.MonitoringJsonDatasetFormat) []map[string]interface{} { +func flattenMonitoringJSONDatasetFormat(config *sagemaker.MonitoringJsonDatasetFormat) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -759,7 +761,7 @@ func flattenEndpointInput(config *sagemaker.EndpointInput) []map[string]interfac return []map[string]interface{}{m} } -func flattenDataQualityJobOutputConfig(config *sagemaker.MonitoringOutputConfig) []map[string]interface{} { +func flattenMonitoringOutputConfig(config *sagemaker.MonitoringOutputConfig) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -782,14 +784,14 @@ func flattenMonitoringOutputs(list []*sagemaker.MonitoringOutput) []map[string]i for _, lRaw := range list { m := make(map[string]interface{}) - m["s3_output"] = flattenS3Output(lRaw.S3Output) + m["s3_output"] = flattenMonitoringS3Output(lRaw.S3Output) outputs = append(outputs, m) } return outputs } -func flattenS3Output(config *sagemaker.MonitoringS3Output) []map[string]interface{} { +func flattenMonitoringS3Output(config *sagemaker.MonitoringS3Output) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -811,7 +813,7 @@ func flattenS3Output(config *sagemaker.MonitoringS3Output) []map[string]interfac return []map[string]interface{}{m} } -func flattenJobResources(config *sagemaker.MonitoringResources) []map[string]interface{} { +func flattenMonitoringResources(config *sagemaker.MonitoringResources) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -819,13 +821,13 @@ func flattenJobResources(config *sagemaker.MonitoringResources) []map[string]int m := map[string]interface{}{} if config.ClusterConfig != nil { - m["cluster_config"] = flattenClusterConfig(config.ClusterConfig) + m["cluster_config"] = flattenMonitoringClusterConfig(config.ClusterConfig) } return []map[string]interface{}{m} } -func flattenClusterConfig(config *sagemaker.MonitoringClusterConfig) []map[string]interface{} { +func flattenMonitoringClusterConfig(config *sagemaker.MonitoringClusterConfig) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -851,7 +853,7 @@ func flattenClusterConfig(config *sagemaker.MonitoringClusterConfig) []map[strin return []map[string]interface{}{m} } -func flattenNetworkConfig(config *sagemaker.MonitoringNetworkConfig) []map[string]interface{} { +func flattenMonitoringNetworkConfig(config *sagemaker.MonitoringNetworkConfig) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -891,7 +893,7 @@ func flattenVpcConfig(config *sagemaker.VpcConfig) []map[string]interface{} { return []map[string]interface{}{m} } -func flattenStoppingCondition(config *sagemaker.MonitoringStoppingCondition) []map[string]interface{} { +func flattenMonitoringStoppingCondition(config *sagemaker.MonitoringStoppingCondition) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -979,17 +981,17 @@ func expandDataQualityBaselineConfig(configured []interface{}) *sagemaker.DataQu c := &sagemaker.DataQualityBaselineConfig{} if v, ok := m["constraints_resource"].([]interface{}); ok && len(v) > 0 { - c.ConstraintsResource = expandConstraintsResource(v) + c.ConstraintsResource = expandMonitoringConstraintsResource(v) } if v, ok := m["statistics_resource"].([]interface{}); ok && len(v) > 0 { - c.StatisticsResource = expandStatisticsResource(v) + c.StatisticsResource = expandMonitoringStatisticsResource(v) } return c } -func expandConstraintsResource(configured []interface{}) *sagemaker.MonitoringConstraintsResource { +func expandMonitoringConstraintsResource(configured []interface{}) *sagemaker.MonitoringConstraintsResource { if len(configured) == 0 { return nil } @@ -1005,7 +1007,7 @@ func expandConstraintsResource(configured []interface{}) *sagemaker.MonitoringCo return c } -func expandStatisticsResource(configured []interface{}) *sagemaker.MonitoringStatisticsResource { +func expandMonitoringStatisticsResource(configured []interface{}) *sagemaker.MonitoringStatisticsResource { if len(configured) == 0 { return nil } @@ -1083,7 +1085,7 @@ func expandBatchTransformInput(configured []interface{}) *sagemaker.BatchTransfo } if v, ok := m["dataset_format"].([]interface{}); ok && len(v) > 0 { - c.DatasetFormat = expandDatasetFormat(v) + c.DatasetFormat = expandMonitoringDatasetFormat(v) } if v, ok := m["local_path"].(string); ok && v != "" { @@ -1101,7 +1103,7 @@ func expandBatchTransformInput(configured []interface{}) *sagemaker.BatchTransfo return c } -func expandDatasetFormat(configured []interface{}) *sagemaker.MonitoringDatasetFormat { +func expandMonitoringDatasetFormat(configured []interface{}) *sagemaker.MonitoringDatasetFormat { if len(configured) == 0 { return nil } @@ -1111,17 +1113,17 @@ func expandDatasetFormat(configured []interface{}) *sagemaker.MonitoringDatasetF c := &sagemaker.MonitoringDatasetFormat{} if v, ok := m["csv"].([]interface{}); ok && len(v) > 0 { - c.Csv = expandCsv(v) + c.Csv = expandMonitoringCSVDatasetFormat(v) } if v, ok := m["json"].([]interface{}); ok && len(v) > 0 { - c.Json = expandJson(v) + c.Json = expandMonitoringJSONDatasetFormat(v) } return c } -func expandJson(configured []interface{}) *sagemaker.MonitoringJsonDatasetFormat { +func expandMonitoringJSONDatasetFormat(configured []interface{}) *sagemaker.MonitoringJsonDatasetFormat { if len(configured) == 0 { return nil } @@ -1140,7 +1142,7 @@ func expandJson(configured []interface{}) *sagemaker.MonitoringJsonDatasetFormat return c } -func expandCsv(configured []interface{}) *sagemaker.MonitoringCsvDatasetFormat { +func expandMonitoringCSVDatasetFormat(configured []interface{}) *sagemaker.MonitoringCsvDatasetFormat { if len(configured) == 0 { return nil } @@ -1159,7 +1161,7 @@ func expandCsv(configured []interface{}) *sagemaker.MonitoringCsvDatasetFormat { return c } -func expandDataQualityJobOutputConfig(configured []interface{}) *sagemaker.MonitoringOutputConfig { +func expandMonitoringOutputConfig(configured []interface{}) *sagemaker.MonitoringOutputConfig { if len(configured) == 0 { return nil } @@ -1186,7 +1188,7 @@ func expandMonitoringOutputs(configured []interface{}) []*sagemaker.MonitoringOu data := lRaw.(map[string]interface{}) l := &sagemaker.MonitoringOutput{ - S3Output: expandS3Output(data["s3_output"].([]interface{})), + S3Output: expandMonitoringS3Output(data["s3_output"].([]interface{})), } containers = append(containers, l) } @@ -1194,7 +1196,7 @@ func expandMonitoringOutputs(configured []interface{}) []*sagemaker.MonitoringOu return containers } -func expandS3Output(configured []interface{}) *sagemaker.MonitoringS3Output { +func expandMonitoringS3Output(configured []interface{}) *sagemaker.MonitoringS3Output { if len(configured) == 0 { return nil } @@ -1218,7 +1220,7 @@ func expandS3Output(configured []interface{}) *sagemaker.MonitoringS3Output { return c } -func expandJobResources(configured []interface{}) *sagemaker.MonitoringResources { +func expandMonitoringResources(configured []interface{}) *sagemaker.MonitoringResources { if len(configured) == 0 { return nil } @@ -1228,13 +1230,13 @@ func expandJobResources(configured []interface{}) *sagemaker.MonitoringResources c := &sagemaker.MonitoringResources{} if v, ok := m["cluster_config"].([]interface{}); ok && len(v) > 0 { - c.ClusterConfig = expandClusterConfig(v) + c.ClusterConfig = expandMonitoringClusterConfig(v) } return c } -func expandClusterConfig(configured []interface{}) *sagemaker.MonitoringClusterConfig { +func expandMonitoringClusterConfig(configured []interface{}) *sagemaker.MonitoringClusterConfig { if len(configured) == 0 { return nil } @@ -1262,7 +1264,7 @@ func expandClusterConfig(configured []interface{}) *sagemaker.MonitoringClusterC return c } -func expandNetworkConfig(configured []interface{}) *sagemaker.MonitoringNetworkConfig { +func expandMonitoringNetworkConfig(configured []interface{}) *sagemaker.MonitoringNetworkConfig { if len(configured) == 0 { return nil } @@ -1306,7 +1308,7 @@ func expandVpcConfig(configured []interface{}) *sagemaker.VpcConfig { return c } -func expandStoppingCondition(configured []interface{}) *sagemaker.MonitoringStoppingCondition { +func expandMonitoringStoppingCondition(configured []interface{}) *sagemaker.MonitoringStoppingCondition { if len(configured) == 0 { return nil } diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 5dd0d7605d0b..60be2c87a8c7 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -162,7 +162,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransform(t *testing.T) { }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransformCsvHeader(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_batchTransformCSVHeader(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -174,7 +174,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransformCsvHeader(t *testing CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataQualityJobDefinitionConfig_batchTransformCsvHeader(rName), + Config: testAccDataQualityJobDefinitionConfig_batchTransformCSVHeader(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), @@ -192,7 +192,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransformCsvHeader(t *testing }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransformJson(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_batchTransformJSON(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -204,7 +204,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransformJson(t *testing.T) { CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataQualityJobDefinitionConfig_batchTransformJson(rName), + Config: testAccDataQualityJobDefinitionConfig_batchTransformJSON(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.0.batch_transform_input.#", "1"), @@ -221,7 +221,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransformJson(t *testing.T) { }) } -func TestAccSageMakerDataQualityJobDefinition_batchTransformJsonLine(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_batchTransformJSONLine(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -233,7 +233,7 @@ func TestAccSageMakerDataQualityJobDefinition_batchTransformJsonLine(t *testing. CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataQualityJobDefinitionConfig_batchTransformJsonLine(rName), + Config: testAccDataQualityJobDefinitionConfig_batchTransformJSONLine(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_input.#", "1"), @@ -944,7 +944,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccDataQualityJobDefinitionConfig_batchTransformCsvHeader(rName string) string { +func testAccDataQualityJobDefinitionConfig_batchTransformCSVHeader(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q @@ -980,7 +980,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccDataQualityJobDefinitionConfig_batchTransformJson(rName string) string { +func testAccDataQualityJobDefinitionConfig_batchTransformJSON(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q @@ -1014,7 +1014,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccDataQualityJobDefinitionConfig_batchTransformJsonLine(rName string) string { +func testAccDataQualityJobDefinitionConfig_batchTransformJSONLine(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { name = %[1]q From 9ab42a13b496106e8faeaf7d64cc3dfb51289031 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 16:55:32 -0400 Subject: [PATCH 42/49] Fix markdownlint 'MD032/blanks-around-lists Lists should be surrounded by blank lines'. --- ...er_data_quality_job_definition.html.markdown | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/website/docs/r/sagemaker_data_quality_job_definition.html.markdown b/website/docs/r/sagemaker_data_quality_job_definition.html.markdown index bd59de3b98be..ffc9ba040277 100644 --- a/website/docs/r/sagemaker_data_quality_job_definition.html.markdown +++ b/website/docs/r/sagemaker_data_quality_job_definition.html.markdown @@ -67,20 +67,25 @@ The following arguments are supported: * `record_preprocessor_source_uri` - (Optional) An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers. ### data_quality_baseline_config + * `constraints_resource` - (Optional) The constraints resource for a monitoring job. Fields are documented below. * `statistics_resource` - (Optional) The statistics resource for a monitoring job. Fields are documented below. #### constraints_resource + * `s3_uri` - (Optional) The Amazon S3 URI for the constraints resource. #### statistics_resource + * `s3_uri` - (Optional) The Amazon S3 URI for the statistics resource. ### data_quality_job_input + * `batch_transform_input` - (Optional) Input object for the batch transform job. Fields are documented below. * `endpoint_input` - (Optional) Input object for the endpoint. Fields are documented below. #### batch_transform_input + * `data_captured_destination_s3_uri` - (Required) The Amazon S3 location being used to capture the data. * `dataset_format` - (Required) The dataset format for your batch transform job. Fields are documented below. * `local_path` - (Optional) Path to the filesystem where the batch transform data is available to the container. Defaults to `/opt/ml/processing/input`. @@ -88,52 +93,64 @@ The following arguments are supported: * `s3_input_mode` - (Optional) Whether the `Pipe` or `File` is used as the input mode for transferring data for the monitoring job. `Pipe` mode is recommended for large datasets. `File` mode is useful for small files that fit in memory. Defaults to `File`. Valid values are `Pipe` or `File` ##### dataset_format + * `csv` - (Optional) The CSV dataset used in the monitoring job. Fields are documented below. * `json` - (Optional) The JSON dataset used in the monitoring job. Fields are documented below. ###### csv + * `header` - (Optional) Indicates if the CSV data has a header. ###### json + * `line` - (Optional) Indicates if the file should be read as a json object per line. #### endpoint_input + * `endpoint_name` - (Required) An endpoint in customer's account which has `data_capture_config` enabled. * `local_path` - (Optional) Path to the filesystem where the endpoint data is available to the container. Defaults to `/opt/ml/processing/input`. * `s3_data_distribution_type` - (Optional) Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defaults to `FullyReplicated`. Valid values are `FullyReplicated` or `ShardedByS3Key` * `s3_input_mode` - (Optional) Whether the `Pipe` or `File` is used as the input mode for transferring data for the monitoring job. `Pipe` mode is recommended for large datasets. `File` mode is useful for small files that fit in memory. Defaults to `File`. Valid values are `Pipe` or `File` ### data_quality_job_output_config + * `kms_key_id` - (Optional) The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. * `monitoring_outputs` - (Required) Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded. Fields are documented below. #### monitoring_outputs + * `s3_output` - (Required) The Amazon S3 storage location where the results of a monitoring job are saved. Fields are documented below. ##### s3_output + * `local_path` - (Optional) The local path to the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job. LocalPath is an absolute path for the output data. Defaults to `/opt/ml/processing/output`. * `s3_upload_mode` - (Optional) Whether to upload the results of the monitoring job continuously or after the job completes. Valid values are `Continuous` or `EndOfJob` * `s3_uri` - (Required) A URI that identifies the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job. ### job_resources + * `cluster_config` - (Required) The configuration for the cluster resources used to run the processing job. Fields are documented below. #### cluster_config + * `instance_count` - (Required) The number of ML compute instances to use in the model monitoring job. For distributed processing jobs, specify a value greater than 1. * `instance_type` - (Required) The ML compute instance type for the processing job. * `volume_kms_key_id` - (Optional) The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job. * `volume_size_in_gb` - (Required) The size of the ML storage volume, in gigabytes, that you want to provision. You must specify sufficient ML storage for your scenario. ### network_config + * `enable_inter_container_traffic_encryption` - (Optional) Whether to encrypt all communications between the instances used for the monitoring jobs. Choose `true` to encrypt communications. Encryption provides greater security for distributed jobs, but the processing might take longer. * `enable_network_isolation` - (Optional) Whether to allow inbound and outbound network calls to and from the containers used for the monitoring job. * `vpc_config` - (Optional) Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. Fields are documented below. #### vpc_config + * `security_group_ids` - (Required) The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the `subnets` field. * `subnets` - (Required) The ID of the subnets in the VPC to which you want to connect your training job or model. ### stopping_condition + * `max_runtime_in_seconds` - (Required) The maximum runtime allowed in seconds. ## Attributes Reference From ae63249d82feb5f3f2e52008510e1dc6fdaa3f22 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 16:56:37 -0400 Subject: [PATCH 43/49] r/aws_sagemaker_data_quality_job_definition: Fix documentation terrafmt errors. --- .../r/sagemaker_data_quality_job_definition.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/r/sagemaker_data_quality_job_definition.html.markdown b/website/docs/r/sagemaker_data_quality_job_definition.html.markdown index ffc9ba040277..2c57223ab016 100644 --- a/website/docs/r/sagemaker_data_quality_job_definition.html.markdown +++ b/website/docs/r/sagemaker_data_quality_job_definition.html.markdown @@ -29,14 +29,14 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_quality_job_output_config { monitoring_outputs { s3_output { - s3_uri = "https://${aws_s3_bucket.my_bucket.bucket_regional_domain_name}/output" + s3_uri = "https://${aws_s3_bucket.my_bucket.bucket_regional_domain_name}/output" } } } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } From c803f108eecc5f32bdc5ce41d220b91e5d6a8aed Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 16:57:18 -0400 Subject: [PATCH 44/49] Fix semgrep 'ci.caps2-in-func-name'. --- internal/service/sagemaker/data_quality_job_definition.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index 90eb665b21c4..def8bcd157d2 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -869,13 +869,13 @@ func flattenMonitoringNetworkConfig(config *sagemaker.MonitoringNetworkConfig) [ } if config.VpcConfig != nil { - m["vpc_config"] = flattenVpcConfig(config.VpcConfig) + m["vpc_config"] = flattenVPCConfig(config.VpcConfig) } return []map[string]interface{}{m} } -func flattenVpcConfig(config *sagemaker.VpcConfig) []map[string]interface{} { +func flattenVPCConfig(config *sagemaker.VpcConfig) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } @@ -1282,13 +1282,13 @@ func expandMonitoringNetworkConfig(configured []interface{}) *sagemaker.Monitori } if v, ok := m["vpc_config"].([]interface{}); ok && len(v) > 0 { - c.VpcConfig = expandVpcConfig(v) + c.VpcConfig = expandVPCConfig(v) } return c } -func expandVpcConfig(configured []interface{}) *sagemaker.VpcConfig { +func expandVPCConfig(configured []interface{}) *sagemaker.VpcConfig { if len(configured) == 0 { return nil } From d0d1ceea0540de51f7a8cd04aaa1df7a4b170d36 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 17:02:19 -0400 Subject: [PATCH 45/49] Fix compilation errors. --- .../sagemaker/data_quality_job_definition.go | 31 +++++++++++++++++-- internal/service/sagemaker/find.go | 25 --------------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index def8bcd157d2..65d2554ffce9 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -9,13 +9,15 @@ import ( "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -457,7 +459,7 @@ func resourceDataQualityJobDefinitionCreate(ctx context.Context, d *schema.Resou if v, ok := d.GetOk("name"); ok { name = v.(string) } else { - name = resource.UniqueId() + name = id.UniqueId() } var roleArn string @@ -569,6 +571,31 @@ func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.Resourc return diags } +func FindDataQualityJobDefinitionByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeDataQualityJobDefinitionOutput, error) { + input := &sagemaker.DescribeDataQualityJobDefinitionInput{ + JobDefinitionName: aws.String(name), + } + + output, err := conn.DescribeDataQualityJobDefinitionWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + func flattenDataQualityAppSpecification(config *sagemaker.DataQualityAppSpecification) []map[string]interface{} { if config == nil { return []map[string]interface{}{} diff --git a/internal/service/sagemaker/find.go b/internal/service/sagemaker/find.go index fc5d1333e1ab..908c06c0c466 100644 --- a/internal/service/sagemaker/find.go +++ b/internal/service/sagemaker/find.go @@ -497,31 +497,6 @@ func FindEndpointConfigByName(ctx context.Context, conn *sagemaker.SageMaker, na return output, nil } -func FindDataQualityJobDefinitionByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeDataQualityJobDefinitionOutput, error) { - input := &sagemaker.DescribeDataQualityJobDefinitionInput{ - JobDefinitionName: aws.String(name), - } - - output, err := conn.DescribeDataQualityJobDefinitionWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { - return nil, &resource.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - func FindFlowDefinitionByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeFlowDefinitionOutput, error) { input := &sagemaker.DescribeFlowDefinitionInput{ FlowDefinitionName: aws.String(name), From 370acd5869690797eba97baa64d986ff713ba28f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 17:03:23 -0400 Subject: [PATCH 46/49] Correctly order CRUD handlers. --- .../sagemaker/data_quality_job_definition.go | 70 +++++++++---------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index 65d2554ffce9..e1482b0102bb 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -571,6 +571,40 @@ func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.Resourc return diags } +func resourceDataQualityJobDefinitionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(ctx, conn, d.Get("arn").(string), o, n); err != nil { + return sdkdiag.AppendErrorf(diags, "updating SageMaker Data Quality Job Definition (%s) tags: %s", d.Id(), err) + } + } + return append(diags, resourceDataQualityJobDefinitionRead(ctx, d, meta)...) +} + +func resourceDataQualityJobDefinitionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SageMakerConn() + + log.Printf("[INFO] Deleting SageMaker Data Quality Job Definition: %s", d.Id()) + _, err := conn.DeleteDataQualityJobDefinitionWithContext(ctx, &sagemaker.DeleteDataQualityJobDefinitionInput{ + JobDefinitionName: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) + } + + return diags +} + func FindDataQualityJobDefinitionByName(ctx context.Context, conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeDataQualityJobDefinitionOutput, error) { input := &sagemaker.DescribeDataQualityJobDefinitionInput{ JobDefinitionName: aws.String(name), @@ -934,42 +968,6 @@ func flattenMonitoringStoppingCondition(config *sagemaker.MonitoringStoppingCond return []map[string]interface{}{m} } -func resourceDataQualityJobDefinitionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SageMakerConn() - - if d.HasChange("tags_all") { - o, n := d.GetChange("tags_all") - - if err := UpdateTags(ctx, conn, d.Get("arn").(string), o, n); err != nil { - return sdkdiag.AppendErrorf(diags, "updating SageMaker Data Quality Job Definition (%s) tags: %s", d.Id(), err) - } - } - return append(diags, resourceDataQualityJobDefinitionRead(ctx, d, meta)...) -} - -func resourceDataQualityJobDefinitionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SageMakerConn() - - deleteOpts := &sagemaker.DeleteDataQualityJobDefinitionInput{ - JobDefinitionName: aws.String(d.Id()), - } - log.Printf("[INFO] Deleting SageMaker Data Quality Job Definition : %s", d.Id()) - - _, err := conn.DeleteDataQualityJobDefinitionWithContext(ctx, deleteOpts) - - if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { - return diags - } - - if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) - } - - return diags -} - func expandDataQualityAppSpecification(configured []interface{}) *sagemaker.DataQualityAppSpecification { if len(configured) == 0 { return nil From 96294e31f8364c129763ffb6035d0db54ab6c225 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 17:06:39 -0400 Subject: [PATCH 47/49] r/aws_sagemaker_data_quality_job_definition: Transparent tagging. --- .../sagemaker/data_quality_job_definition.go | 49 +++++-------------- .../service/sagemaker/service_package_gen.go | 4 ++ 2 files changed, 16 insertions(+), 37 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition.go b/internal/service/sagemaker/data_quality_job_definition.go index e1482b0102bb..99bee78a7098 100644 --- a/internal/service/sagemaker/data_quality_job_definition.go +++ b/internal/service/sagemaker/data_quality_job_definition.go @@ -19,9 +19,11 @@ import ( tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_sagemaker_data_quality_job_definition") +// @SDKResource("aws_sagemaker_data_quality_job_definition", name="Data Quality Job Definition") +// @Tags(identifierAttribute="arn") func ResourceDataQualityJobDefinition() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDataQualityJobDefinitionCreate, @@ -442,9 +444,10 @@ func ResourceDataQualityJobDefinition() *schema.Resource { }, }, }, - "tags": tftags.TagsSchema(), - "tags_all": tftags.TagsSchemaComputed(), + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), }, + CustomizeDiff: verify.SetTagsDiff, } } @@ -452,8 +455,6 @@ func ResourceDataQualityJobDefinition() *schema.Resource { func resourceDataQualityJobDefinitionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).SageMakerConn() - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - tags := defaultTagsConfig.MergeTags(tftags.New(ctx, d.Get("tags").(map[string]interface{}))) var name string if v, ok := d.GetOk("name"); ok { @@ -468,12 +469,13 @@ func resourceDataQualityJobDefinitionCreate(ctx context.Context, d *schema.Resou } createOpts := &sagemaker.CreateDataQualityJobDefinitionInput{ - JobDefinitionName: aws.String(name), DataQualityAppSpecification: expandDataQualityAppSpecification(d.Get("data_quality_app_specification").([]interface{})), DataQualityJobInput: expandDataQualityJobInput(d.Get("data_quality_job_input").([]interface{})), DataQualityJobOutputConfig: expandMonitoringOutputConfig(d.Get("data_quality_job_output_config").([]interface{})), + JobDefinitionName: aws.String(name), JobResources: expandMonitoringResources(d.Get("job_resources").([]interface{})), RoleArn: aws.String(roleArn), + Tags: GetTagsIn(ctx), } if v, ok := d.GetOk("data_quality_baseline_config"); ok && len(v.([]interface{})) > 0 { @@ -488,15 +490,12 @@ func resourceDataQualityJobDefinitionCreate(ctx context.Context, d *schema.Resou createOpts.StoppingCondition = expandMonitoringStoppingCondition(v.([]interface{})) } - if len(tags) > 0 { - createOpts.Tags = Tags(tags.IgnoreAWS()) - } - - log.Printf("[DEBUG] SageMaker Data Quality Job Definition create config: %#v", *createOpts) _, err := conn.CreateDataQualityJobDefinitionWithContext(ctx, createOpts) + if err != nil { - return sdkdiag.AppendErrorf(diags, "creating SageMaker Data Quality Job Definition: %s", err) + return sdkdiag.AppendErrorf(diags, "creating SageMaker Data Quality Job Definition (%s): %s", name, err) } + d.SetId(name) return append(diags, resourceDataQualityJobDefinitionRead(ctx, d, meta)...) @@ -505,8 +504,6 @@ func resourceDataQualityJobDefinitionCreate(ctx context.Context, d *schema.Resou func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).SageMakerConn() - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig jobDefinition, err := FindDataQualityJobDefinitionByName(ctx, conn, d.Id()) @@ -552,36 +549,14 @@ func resourceDataQualityJobDefinitionRead(ctx context.Context, d *schema.Resourc return sdkdiag.AppendErrorf(diags, "setting stopping_condition for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) } - tags, err := ListTags(ctx, conn, aws.StringValue(jobDefinition.JobDefinitionArn)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing tags for SageMaker Data Quality Job Definition (%s): %s", d.Id(), err) - } - - tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - - //lintignore:AWSR002 - if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } - - if err := d.Set("tags_all", tags.Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags_all: %s", err) - } - return diags } func resourceDataQualityJobDefinitionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SageMakerConn() - if d.HasChange("tags_all") { - o, n := d.GetChange("tags_all") + // Tags only. - if err := UpdateTags(ctx, conn, d.Get("arn").(string), o, n); err != nil { - return sdkdiag.AppendErrorf(diags, "updating SageMaker Data Quality Job Definition (%s) tags: %s", d.Id(), err) - } - } return append(diags, resourceDataQualityJobDefinitionRead(ctx, d, meta)...) } diff --git a/internal/service/sagemaker/service_package_gen.go b/internal/service/sagemaker/service_package_gen.go index 0bbd17a794bb..7d4da88edb94 100644 --- a/internal/service/sagemaker/service_package_gen.go +++ b/internal/service/sagemaker/service_package_gen.go @@ -57,6 +57,10 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka { Factory: ResourceDataQualityJobDefinition, TypeName: "aws_sagemaker_data_quality_job_definition", + Name: "Data Quality Job Definition", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, }, { Factory: ResourceDevice, From 239cb5f38ab7ebd3b4a6d9c5bf50e86ab60cc6a5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 17:26:50 -0400 Subject: [PATCH 48/49] r/aws_sagemaker_data_quality_job_definition: Fix acceptance test terrafmt errors. --- .../sagemaker/data_quality_job_definition_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 60be2c87a8c7..12004bf3b61c 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -776,7 +776,7 @@ resource "aws_sagemaker_endpoint_configuration" "test" { initial_sampling_percentage = 100 destination_s3_uri = "s3://${aws_s3_bucket.test.bucket_regional_domain_name}/capture" - + capture_options { capture_mode = "Input" } @@ -832,7 +832,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { func testAccDataQualityJobDefinitionConfig_appSpecificationOptional(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_sagemaker_data_quality_job_definition" "test" { - name = %[1]q + name = %[1]q data_quality_app_specification { image_uri = data.aws_sagemaker_prebuilt_ecr_image.monitor.registry_path environment = { @@ -956,7 +956,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" dataset_format { csv { - header = true + header = true } } } @@ -1026,7 +1026,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { data_captured_destination_s3_uri = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/captured" dataset_format { json { - line = true + line = true } } } @@ -1040,8 +1040,8 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { } job_resources { cluster_config { - instance_count = 1 - instance_type = "ml.t3.medium" + instance_count = 1 + instance_type = "ml.t3.medium" volume_size_in_gb = 20 } } From 110476861875b0813ca7755fb6fa2b682e6f05b7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 11 Apr 2023 17:29:07 -0400 Subject: [PATCH 49/49] Fix semgrep 'ci.caps4-in-func-name'. --- .../sagemaker/data_quality_job_definition_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/service/sagemaker/data_quality_job_definition_test.go b/internal/service/sagemaker/data_quality_job_definition_test.go index 12004bf3b61c..7f2959376f89 100644 --- a/internal/service/sagemaker/data_quality_job_definition_test.go +++ b/internal/service/sagemaker/data_quality_job_definition_test.go @@ -315,7 +315,7 @@ func TestAccSageMakerDataQualityJobDefinition_endpointOptional(t *testing.T) { }) } -func TestAccSageMakerDataQualityJobDefinition_ouputConfigKmsKeyId(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_outputConfigKMSKeyID(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -327,7 +327,7 @@ func TestAccSageMakerDataQualityJobDefinition_ouputConfigKmsKeyId(t *testing.T) CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataQualityJobDefinitionConfig_outputConfigKmsKeyId(rName), + Config: testAccDataQualityJobDefinitionConfig_outputConfigKMSKeyID(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "data_quality_job_output_config.#", "1"), @@ -374,7 +374,7 @@ func TestAccSageMakerDataQualityJobDefinition_outputConfigOptional(t *testing.T) }) } -func TestAccSageMakerDataQualityJobDefinition_jobResourcesVolumeKmsKeyId(t *testing.T) { +func TestAccSageMakerDataQualityJobDefinition_jobResourcesVolumeKMSKeyID(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sagemaker_data_quality_job_definition.test" @@ -386,7 +386,7 @@ func TestAccSageMakerDataQualityJobDefinition_jobResourcesVolumeKmsKeyId(t *test CheckDestroy: testAccCheckDataQualityJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataQualityJobDefinitionConfig_jobResourcesVolumeKey(rName), + Config: testAccDataQualityJobDefinitionConfig_jobResourcesVolumeKMSKeyID(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataQualityJobDefinitionExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "job_resources.#", "1"), @@ -1121,7 +1121,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccDataQualityJobDefinitionConfig_outputConfigKmsKeyId(rName string) string { +func testAccDataQualityJobDefinitionConfig_outputConfigKMSKeyID(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_kms_key" "test" { description = %[1]q @@ -1197,7 +1197,7 @@ resource "aws_sagemaker_data_quality_job_definition" "test" { `, rName)) } -func testAccDataQualityJobDefinitionConfig_jobResourcesVolumeKey(rName string) string { +func testAccDataQualityJobDefinitionConfig_jobResourcesVolumeKMSKeyID(rName string) string { return acctest.ConfigCompose(testAccDataQualityJobDefinitionConfig_batchTransformBase(rName), fmt.Sprintf(` resource "aws_kms_key" "test" { description = %[1]q