diff --git a/.changelog/30214.txt b/.changelog/30214.txt new file mode 100644 index 000000000000..7628c89a6552 --- /dev/null +++ b/.changelog/30214.txt @@ -0,0 +1,11 @@ +```release-note:bug +datasource/aws_ecs_task_execution: Fix type assertion panic on `overrides.0.inference_accelerator_overrides` attribute +``` + +```release-note:bug +datasource/aws_ecs_task_execution: Fix type assertion panic on `overrides.0.container_overrides.*.environment` attribute +``` + +```release-note:bug +datasource/aws_ecs_task_execution: Fix type assertion panic on `overrides.0.container_overrides.*.resource_requirements` attribute +``` diff --git a/internal/service/ecs/task_execution_data_source.go b/internal/service/ecs/task_execution_data_source.go index 0057cce6b3da..75e82b76d6cb 100644 --- a/internal/service/ecs/task_execution_data_source.go +++ b/internal/service/ecs/task_execution_data_source.go @@ -361,6 +361,7 @@ func expandTaskOverride(tfList []interface{}) *ecs.TaskOverride { if len(tfList) == 0 { return nil } + apiObject := &ecs.TaskOverride{} tfMap := tfList[0].(map[string]interface{}) @@ -377,7 +378,7 @@ func expandTaskOverride(tfList []interface{}) *ecs.TaskOverride { apiObject.TaskRoleArn = aws.String(v.(string)) } if v, ok := tfMap["inference_accelerator_overrides"]; ok { - apiObject.InferenceAcceleratorOverrides = expandInferenceAcceleratorOverrides(v.([]interface{})) + apiObject.InferenceAcceleratorOverrides = expandInferenceAcceleratorOverrides(v.(*schema.Set)) } if v, ok := tfMap["container_overrides"]; ok { apiObject.ContainerOverrides = expandContainerOverride(v.([]interface{})) @@ -386,13 +387,13 @@ func expandTaskOverride(tfList []interface{}) *ecs.TaskOverride { return apiObject } -func expandInferenceAcceleratorOverrides(tfList []interface{}) []*ecs.InferenceAcceleratorOverride { - if len(tfList) == 0 { +func expandInferenceAcceleratorOverrides(tfSet *schema.Set) []*ecs.InferenceAcceleratorOverride { + if tfSet.Len() == 0 { return nil } apiObject := make([]*ecs.InferenceAcceleratorOverride, 0) - for _, item := range tfList { + for _, item := range tfSet.List() { tfMap := item.(map[string]interface{}) iao := &ecs.InferenceAcceleratorOverride{ DeviceName: aws.String(tfMap["device_name"].(string)), @@ -420,19 +421,19 @@ func expandContainerOverride(tfList []interface{}) []*ecs.ContainerOverride { co.Command = flex.ExpandStringList(commandStrings) } if v, ok := tfMap["cpu"]; ok { - co.Cpu = aws.Int64(v.(int64)) + co.Cpu = aws.Int64(int64(v.(int))) } if v, ok := tfMap["environment"]; ok { - co.Environment = expandTaskEnvironment(v.([]interface{})) + co.Environment = expandTaskEnvironment(v.(*schema.Set)) } if v, ok := tfMap["memory"]; ok { - co.Memory = aws.Int64(v.(int64)) + co.Memory = aws.Int64(int64(v.(int))) } if v, ok := tfMap["memory_reservation"]; ok { - co.Memory = aws.Int64(v.(int64)) + co.Memory = aws.Int64(int64(v.(int))) } if v, ok := tfMap["resource_requirements"]; ok { - co.ResourceRequirements = expandResourceRequirements(v.([]interface{})) + co.ResourceRequirements = expandResourceRequirements(v.(*schema.Set)) } apiObject = append(apiObject, co) } @@ -440,16 +441,16 @@ func expandContainerOverride(tfList []interface{}) []*ecs.ContainerOverride { return apiObject } -func expandTaskEnvironment(tfList []interface{}) []*ecs.KeyValuePair { - if len(tfList) == 0 { +func expandTaskEnvironment(tfSet *schema.Set) []*ecs.KeyValuePair { + if tfSet.Len() == 0 { return nil } apiObject := make([]*ecs.KeyValuePair, 0) - for _, item := range tfList { + for _, item := range tfSet.List() { tfMap := item.(map[string]interface{}) te := &ecs.KeyValuePair{ - Name: aws.String(tfMap["name"].(string)), + Name: aws.String(tfMap["key"].(string)), Value: aws.String(tfMap["value"].(string)), } apiObject = append(apiObject, te) @@ -458,13 +459,13 @@ func expandTaskEnvironment(tfList []interface{}) []*ecs.KeyValuePair { return apiObject } -func expandResourceRequirements(tfList []interface{}) []*ecs.ResourceRequirement { - if len(tfList) == 0 { +func expandResourceRequirements(tfSet *schema.Set) []*ecs.ResourceRequirement { + if tfSet.Len() == 0 { return nil } apiObject := make([]*ecs.ResourceRequirement, 0) - for _, item := range tfList { + for _, item := range tfSet.List() { tfMap := item.(map[string]interface{}) rr := &ecs.ResourceRequirement{ Type: aws.String(tfMap["type"].(string)), diff --git a/internal/service/ecs/task_execution_data_source_test.go b/internal/service/ecs/task_execution_data_source_test.go index 7b8a4da08f90..a08a9ef67217 100644 --- a/internal/service/ecs/task_execution_data_source_test.go +++ b/internal/service/ecs/task_execution_data_source_test.go @@ -44,6 +44,45 @@ func TestAccECSTaskExecutionDataSource_basic(t *testing.T) { }) } +func TestAccECSTaskExecutionDataSource_overrides(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_ecs_task_execution.test" + clusterName := "aws_ecs_cluster.test" + taskDefinitionName := "aws_ecs_task_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, ecs.EndpointsID) + }, + ErrorCheck: acctest.ErrorCheck(t, ecs.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccTaskExecutionDataSourceConfig_overrides(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "cluster", clusterName, "id"), + resource.TestCheckResourceAttrPair(dataSourceName, "task_definition", taskDefinitionName, "arn"), + resource.TestCheckResourceAttr(dataSourceName, "desired_count", "1"), + resource.TestCheckResourceAttr(dataSourceName, "launch_type", "FARGATE"), + resource.TestCheckResourceAttr(dataSourceName, "network_configuration.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "task_arns.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "overrides.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "overrides.0.container_overrides.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "overrides.0.container_overrides.0.environment.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "overrides.0.container_overrides.0.environment.0.key", "key1"), + resource.TestCheckResourceAttr(dataSourceName, "overrides.0.container_overrides.0.environment.0.value", "value1"), + ), + }, + }, + }) +} + func TestAccECSTaskExecutionDataSource_tags(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -181,3 +220,38 @@ data "aws_ecs_task_execution" "test" { } `, tagKey1, tagValue1)) } + +func testAccTaskExecutionDataSourceConfig_overrides(rName, envKey1, envValue1 string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 2), + testAccTaskExecutionDataSourceConfig_base(rName), + fmt.Sprintf(` +data "aws_ecs_task_execution" "test" { + depends_on = [aws_ecs_cluster_capacity_providers.test] + + cluster = aws_ecs_cluster.test.id + task_definition = aws_ecs_task_definition.test.arn + desired_count = 1 + launch_type = "FARGATE" + + network_configuration { + subnets = aws_subnet.test[*].id + security_groups = [aws_security_group.test.id] + assign_public_ip = false + } + + overrides { + container_overrides { + name = "sleep" + + environment { + key = %[1]q + value = %[2]q + } + } + cpu = "256" + memory = "512" + } +} +`, envKey1, envValue1)) +}