Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adds placement_group_config block to aws_emr_cluster resource #30121

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/30121.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/aws_emr_cluster: Add `placement_group_config` argument
```
72 changes: 72 additions & 0 deletions internal/service/emr/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -460,6 +460,29 @@ func ResourceCluster() *schema.Resource {
ForceNew: true,
Required: true,
},
"placement_group_config": {
Type: schema.TypeList,
ForceNew: true,
Optional: true,
ConfigMode: schema.SchemaConfigModeAttr,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"instance_role": {
Type: schema.TypeString,
ForceNew: true,
Required: true,
ValidateFunc: validation.StringInSlice(emr.InstanceRoleType_Values(), false),
},
"placement_strategy": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice(emr.PlacementGroupStrategy_Values(), false),
},
},
},
},
"release_label": {
Type: schema.TypeString,
ForceNew: true,
Expand Down Expand Up @@ -962,6 +985,11 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int
params.AutoTerminationPolicy = expandAutoTerminationPolicy(v.([]interface{}))
}

if v, ok := d.GetOk("placement_group_config"); ok {
placementGroupConfigs := v.([]interface{})
params.PlacementGroupConfigs = expandPlacementGroupConfigs(placementGroupConfigs)
}

var resp *emr.RunJobFlowOutput
err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError {
var err error
Expand Down Expand Up @@ -1176,6 +1204,10 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter
return sdkdiag.AppendErrorf(diags, "setting auto_termination_policy: %s", err)
}

if err := d.Set("placement_group_config", flattenPlacementGroupConfigs(cluster.PlacementGroups)); err != nil {
return sdkdiag.AppendErrorf(diags, "setting placement_group_config: %s", err)
}

return diags
}

Expand Down Expand Up @@ -2263,3 +2295,43 @@ func flattenAutoTerminationPolicy(atp *emr.AutoTerminationPolicy) []map[string]i

return result
}

func expandPlacementGroupConfigs(placementGroupConfigs []interface{}) []*emr.PlacementGroupConfig {
placementGroupConfigsOut := []*emr.PlacementGroupConfig{}

for _, raw := range placementGroupConfigs {
placementGroupAttributes := raw.(map[string]interface{})
instanceRole := placementGroupAttributes["instance_role"].(string)

placementGroupConfig := &emr.PlacementGroupConfig{
InstanceRole: aws.String(instanceRole),
}
if v, ok := placementGroupAttributes["placement_strategy"]; ok && v.(string) != "" {
placementGroupConfig.PlacementStrategy = aws.String(v.(string))
}
placementGroupConfigsOut = append(placementGroupConfigsOut, placementGroupConfig)
}

return placementGroupConfigsOut
}

func flattenPlacementGroupConfigs(placementGroupSpecifications []*emr.PlacementGroupConfig) []interface{} {
if placementGroupSpecifications == nil {
return []interface{}{}
}

placementGroupConfigs := make([]interface{}, 0)

for _, pgc := range placementGroupSpecifications {
placementGroupConfig := make(map[string]interface{})

placementGroupConfig["instance_role"] = aws.StringValue(pgc.InstanceRole)

if pgc.PlacementStrategy != nil {
placementGroupConfig["placement_strategy"] = aws.StringValue(pgc.PlacementStrategy)
}
placementGroupConfigs = append(placementGroupConfigs, placementGroupConfig)
}

return placementGroupConfigs
}
236 changes: 234 additions & 2 deletions internal/service/emr/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1114,6 +1114,50 @@ func TestAccEMRCluster_Bootstrap_ordering(t *testing.T) {
})
}

func TestAccEMRCluster_PlacementGroupConfigs(t *testing.T) {
ctx := acctest.Context(t)
var cluster emr.Cluster

resourceName := "aws_emr_cluster.test"
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t) },
ErrorCheck: acctest.ErrorCheck(t, emr.EndpointsID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckClusterDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccClusterConfig_PlacementGroup(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckClusterExists(ctx, resourceName, &cluster),
resource.TestCheckResourceAttr(resourceName, "placement_group_config.#", "1"),
resource.TestCheckResourceAttr(resourceName, "placement_group_config.0.instance_role", "MASTER"),
resource.TestCheckResourceAttr(resourceName, "placement_group_config.0.placement_strategy", "SPREAD"),
),
},
{
Config: testAccClusterConfig_PlacementGroupWithOptionalUnset(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckClusterExists(ctx, resourceName, &cluster),
resource.TestCheckResourceAttr(resourceName, "placement_group_config.#", "1"),
resource.TestCheckResourceAttr(resourceName, "placement_group_config.0.instance_role", "MASTER"),
resource.TestCheckResourceAttr(resourceName, "placement_group_config.0.placement_strategy", "SPREAD"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{
"cluster_state", // Ignore RUNNING versus WAITING changes
"configurations",
"keep_job_flow_alive_when_no_steps",
},
},
},
})
}

func TestAccEMRCluster_terminationProtected(t *testing.T) {
ctx := acctest.Context(t)
var cluster emr.Cluster
Expand Down Expand Up @@ -1795,7 +1839,8 @@ resource "aws_security_group" "test" {
}

tags = {
Name = %[1]q
Name = %[1]q
for-use-with-amazon-emr-managed-policies = true
}

# EMR will modify ingress rules
Expand All @@ -1811,7 +1856,8 @@ resource "aws_subnet" "test" {
vpc_id = aws_vpc.test.id

tags = {
Name = %[1]q
Name = %[1]q
for-use-with-amazon-emr-managed-policies = true
}
}

Expand Down Expand Up @@ -1937,6 +1983,36 @@ resource "aws_iam_role_policy_attachment" "emr_service" {
`, rName)
}

func testAccClusterConfig_baseIAMServiceRolev2(rName string) string {
return fmt.Sprintf(`
resource "aws_iam_role" "emr_service" {
name = "%[1]s_default_role"

assume_role_policy = <<EOT
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "elasticmapreduce.${data.aws_partition.current.dns_suffix}"
},
"Action": "sts:AssumeRole"
}
]
}
EOT
}

resource "aws_iam_role_policy_attachment" "emr_service" {
role = aws_iam_role.emr_service.id
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AmazonEMRServicePolicy_v2"
}

`, rName)
}

func testAccClusterConfig_baseIAMAutoScalingRole(rName string) string {
return fmt.Sprintf(`
resource "aws_iam_role" "emr_autoscaling_role" {
Expand Down Expand Up @@ -3993,3 +4069,159 @@ resource "aws_emr_cluster" "test" {
}
`, rName))
}

func testAccClusterConfig_IAMServiceRoleWithPlacementGroup(rName string) string {
return acctest.ConfigCompose(
testAccClusterConfig_baseIAMServiceRolev2(rName),
fmt.Sprintf(`
resource "aws_iam_role_policy_attachment" "emr_placementgroup" {
role = aws_iam_role.emr_service.id
policy_arn = aws_iam_policy.emr_placementgroup.arn
}

resource "aws_iam_policy" "emr_placementgroup" {
name = "%[1]s_placementgroup_profile"

policy = <<EOT
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Resource": "arn:${data.aws_partition.current.partition}:ec2:*:*:placement-group/pg-*",
"Action": [
"ec2:CreatePlacementGroup",
"ec2:CreateTags",
"ec2:DeleteTags"
]
},
{
"Sid": "PassRoleForEC2",
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": "${aws_iam_role.emr_instance_profile.arn}",
"Condition": {
"StringLike": {
"iam:PassedToService": "ec2.${data.aws_partition.current.dns_suffix}*"
}
}
}]
}
EOT
}
`, rName))
}

func testAccClusterConfig_PlacementGroup(rName string) string {
return acctest.ConfigCompose(
testAccClusterConfig_baseVPC(rName, true),
testAccClusterConfig_IAMServiceRoleWithPlacementGroup(rName),
testAccClusterConfig_baseIAMInstanceProfile(rName),
testAccClusterConfig_baseIAMAutoScalingRole(rName),
fmt.Sprintf(`
data "aws_partition" "current" {}

resource "aws_emr_cluster" "test" {
name = %[1]q
release_label = "emr-5.23.0"
applications = ["Spark"]
ec2_attributes {
subnet_id = aws_subnet.test.id
emr_managed_master_security_group = aws_security_group.test.id
emr_managed_slave_security_group = aws_security_group.test.id
instance_profile = aws_iam_instance_profile.emr_instance_profile.arn
}
master_instance_group {
instance_type = "c4.large"
instance_count = 3
}
core_instance_group {
instance_count = 1
instance_type = "c4.large"
}
tags = {
role = "rolename"
dns_zone = "env_zone"
env = "env"
name = "name-env"
for-use-with-amazon-emr-managed-policies = true
}
keep_job_flow_alive_when_no_steps = true
termination_protection = false

scale_down_behavior = "TERMINATE_AT_TASK_COMPLETION"
configurations = "test-fixtures/emr_configurations.json"

placement_group_config {
instance_role = "MASTER"
placement_strategy = "SPREAD"
}
depends_on = [
aws_route_table_association.test,
aws_iam_role_policy_attachment.emr_instance_profile,
aws_iam_role_policy_attachment.emr_autoscaling_role,
aws_iam_role_policy_attachment.emr_service,
aws_iam_role_policy_attachment.emr_placementgroup
]
service_role = aws_iam_role.emr_service.arn
autoscaling_role = aws_iam_role.emr_autoscaling_role.arn
ebs_root_volume_size = 21
}
`, rName))
}

func testAccClusterConfig_PlacementGroupWithOptionalUnset(rName string) string {
return acctest.ConfigCompose(
testAccClusterConfig_baseVPC(rName, true),
testAccClusterConfig_IAMServiceRoleWithPlacementGroup(rName),
testAccClusterConfig_baseIAMInstanceProfile(rName),
testAccClusterConfig_baseIAMAutoScalingRole(rName),
fmt.Sprintf(`
data "aws_partition" "current" {}

resource "aws_emr_cluster" "test" {
name = %[1]q
release_label = "emr-5.23.0"
applications = ["Spark"]
ec2_attributes {
subnet_id = aws_subnet.test.id
emr_managed_master_security_group = aws_security_group.test.id
emr_managed_slave_security_group = aws_security_group.test.id
instance_profile = aws_iam_instance_profile.emr_instance_profile.arn
}
master_instance_group {
instance_type = "c4.large"
instance_count = 3
}
core_instance_group {
instance_count = 1
instance_type = "c4.large"
}
tags = {
role = "rolename"
dns_zone = "env_zone"
env = "env"
name = "name-env"
for-use-with-amazon-emr-managed-policies = true
}
keep_job_flow_alive_when_no_steps = true
termination_protection = false

scale_down_behavior = "TERMINATE_AT_TASK_COMPLETION"
configurations = "test-fixtures/emr_configurations.json"

placement_group_config {
instance_role = "MASTER"
}
depends_on = [
aws_route_table_association.test,
aws_iam_role_policy_attachment.emr_instance_profile,
aws_iam_role_policy_attachment.emr_autoscaling_role,
aws_iam_role_policy_attachment.emr_service,
aws_iam_role_policy_attachment.emr_placementgroup
]
service_role = aws_iam_role.emr_service.arn
autoscaling_role = aws_iam_role.emr_autoscaling_role.arn
ebs_root_volume_size = 21
}
`, rName))
}
6 changes: 6 additions & 0 deletions website/docs/r/emr_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -656,6 +656,7 @@ EOF
* `log_uri` - (Optional) S3 bucket to write the log files of the job flow. If a value is not provided, logs are not created.
* `master_instance_fleet` - (Optional) Configuration block to use an [Instance Fleet](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-fleet.html) for the master node type. Cannot be specified if any `master_instance_group` configuration blocks are set. Detailed below.
* `master_instance_group` - (Optional) Configuration block to use an [Instance Group](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-group-configuration.html#emr-plan-instance-groups) for the [master node type](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-master-core-task-nodes.html#emr-plan-master).
* `placement_group_config` - (Optional) The specified placement group configuration for an Amazon EMR cluster.
* `scale_down_behavior` - (Optional) Way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an `instance group` is resized.
* `security_configuration` - (Optional) Security configuration name to attach to the EMR cluster. Only valid for EMR clusters with `release_label` 4.8.0 or greater.
* `step` - (Optional) List of steps to run when creating the cluster. See below. It is highly recommended to utilize the [lifecycle configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html) with `ignore_changes` if other steps are being managed outside of Terraform. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html).
Expand Down Expand Up @@ -807,6 +808,11 @@ This argument is processed in [attribute-as-blocks mode](https://www.terraform.i
* `main_class` - (Optional) Name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.
* `properties` - (Optional) Key-Value map of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.

### placement_group_config

* `instance_role` - (Required) Role of the instance in the cluster. Valid Values: `MASTER`, `CORE`, `TASK`.
* `placement_strategy` - (Optional) EC2 Placement Group strategy associated with instance role. Valid Values: `SPREAD`, `PARTITION`, `CLUSTER`, `NONE`.

## Attributes Reference

In addition to all arguments above, the following attributes are exported:
Expand Down