From a9626a8a8dedd522fab7c18941c9ff154ec42300 Mon Sep 17 00:00:00 2001
From: Aleksandr Didenko <aa.didenko@yandex.com>
Date: Mon, 31 Jul 2017 19:28:39 +0300
Subject: [PATCH] Add support node config for GKE node pool (#184)

* Add support node config for GKE node pool

* Review fixes:
- Set max items in node config schema
- Fill missing node config fields
- Put test helpers above than test vars

* Update checks in node pool tests

* Fix node pool check match
---
 google/node_config.go                         | 105 +++++++++
 google/resource_container_cluster.go          |  99 +--------
 google/resource_container_node_pool.go        |  65 ++++++
 google/resource_container_node_pool_test.go   | 203 ++++++++++++++++--
 .../docs/r/container_node_pool.html.markdown  |  36 +++-
 5 files changed, 396 insertions(+), 112 deletions(-)
 create mode 100644 google/node_config.go

diff --git a/google/node_config.go b/google/node_config.go
new file mode 100644
index 00000000000..975849c18c1
--- /dev/null
+++ b/google/node_config.go
@@ -0,0 +1,105 @@
+package google
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/terraform/helper/schema"
+)
+
+var schemaNodeConfig = &schema.Schema{
+	Type:     schema.TypeList,
+	Optional: true,
+	Computed: true,
+	ForceNew: true,
+	MaxItems: 1,
+	Elem: &schema.Resource{
+		Schema: map[string]*schema.Schema{
+			"machine_type": {
+				Type:     schema.TypeString,
+				Optional: true,
+				Computed: true,
+				ForceNew: true,
+			},
+
+			"disk_size_gb": {
+				Type:     schema.TypeInt,
+				Optional: true,
+				Computed: true,
+				ForceNew: true,
+				ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
+					value := v.(int)
+
+					if value < 10 {
+						errors = append(errors, fmt.Errorf(
+							"%q cannot be less than 10", k))
+					}
+					return
+				},
+			},
+
+			"local_ssd_count": {
+				Type:     schema.TypeInt,
+				Optional: true,
+				Computed: true,
+				ForceNew: true,
+				ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
+					value := v.(int)
+
+					if value < 0 {
+						errors = append(errors, fmt.Errorf(
+							"%q cannot be negative", k))
+					}
+					return
+				},
+			},
+
+			"oauth_scopes": {
+				Type:     schema.TypeList,
+				Optional: true,
+				Computed: true,
+				ForceNew: true,
+				Elem: &schema.Schema{
+					Type: schema.TypeString,
+					StateFunc: func(v interface{}) string {
+						return canonicalizeServiceScope(v.(string))
+					},
+				},
+			},
+
+			"service_account": {
+				Type:     schema.TypeString,
+				Optional: true,
+				Computed: true,
+				ForceNew: true,
+			},
+
+			"metadata": {
+				Type:     schema.TypeMap,
+				Optional: true,
+				ForceNew: true,
+				Elem:     schema.TypeString,
+			},
+
+			"image_type": {
+				Type:     schema.TypeString,
+				Optional: true,
+				Computed: true,
+				ForceNew: true,
+			},
+
+			"labels": {
+				Type:     schema.TypeMap,
+				Optional: true,
+				ForceNew: true,
+				Elem:     schema.TypeString,
+			},
+
+			"tags": {
+				Type:     schema.TypeList,
+				Optional: true,
+				ForceNew: true,
+				Elem:     &schema.Schema{Type: schema.TypeString},
+			},
+		},
+	},
+}
diff --git a/google/resource_container_cluster.go b/google/resource_container_cluster.go
index b3096da40a6..59ff63b6d0e 100644
--- a/google/resource_container_cluster.go
+++ b/google/resource_container_cluster.go
@@ -214,102 +214,8 @@ func resourceContainerCluster() *schema.Resource {
 					},
 				},
 			},
-			"node_config": {
-				Type:     schema.TypeList,
-				Optional: true,
-				Computed: true,
-				ForceNew: true,
-				Elem: &schema.Resource{
-					Schema: map[string]*schema.Schema{
-						"machine_type": {
-							Type:     schema.TypeString,
-							Optional: true,
-							Computed: true,
-							ForceNew: true,
-						},
 
-						"disk_size_gb": {
-							Type:     schema.TypeInt,
-							Optional: true,
-							Computed: true,
-							ForceNew: true,
-							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
-								value := v.(int)
-
-								if value < 10 {
-									errors = append(errors, fmt.Errorf(
-										"%q cannot be less than 10", k))
-								}
-								return
-							},
-						},
-
-						"local_ssd_count": {
-							Type:     schema.TypeInt,
-							Optional: true,
-							Computed: true,
-							ForceNew: true,
-							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
-								value := v.(int)
-
-								if value < 0 {
-									errors = append(errors, fmt.Errorf(
-										"%q cannot be negative", k))
-								}
-								return
-							},
-						},
-
-						"oauth_scopes": {
-							Type:     schema.TypeList,
-							Optional: true,
-							Computed: true,
-							ForceNew: true,
-							Elem: &schema.Schema{
-								Type: schema.TypeString,
-								StateFunc: func(v interface{}) string {
-									return canonicalizeServiceScope(v.(string))
-								},
-							},
-						},
-
-						"service_account": {
-							Type:     schema.TypeString,
-							Optional: true,
-							Computed: true,
-							ForceNew: true,
-						},
-
-						"metadata": {
-							Type:     schema.TypeMap,
-							Optional: true,
-							ForceNew: true,
-							Elem:     schema.TypeString,
-						},
-
-						"image_type": {
-							Type:     schema.TypeString,
-							Optional: true,
-							Computed: true,
-							ForceNew: true,
-						},
-
-						"labels": {
-							Type:     schema.TypeMap,
-							Optional: true,
-							ForceNew: true,
-							Elem:     schema.TypeString,
-						},
-
-						"tags": {
-							Type:     schema.TypeList,
-							Optional: true,
-							ForceNew: true,
-							Elem:     &schema.Schema{Type: schema.TypeString},
-						},
-					},
-				},
-			},
+			"node_config": schemaNodeConfig,
 
 			"node_version": {
 				Type:     schema.TypeString,
@@ -449,9 +355,6 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
 	}
 	if v, ok := d.GetOk("node_config"); ok {
 		nodeConfigs := v.([]interface{})
-		if len(nodeConfigs) > 1 {
-			return fmt.Errorf("Cannot specify more than one node_config.")
-		}
 		nodeConfig := nodeConfigs[0].(map[string]interface{})
 
 		cluster.NodeConfig = &container.NodeConfig{}
diff --git a/google/resource_container_node_pool.go b/google/resource_container_node_pool.go
index a3a2ed30b14..7fd301612f2 100644
--- a/google/resource_container_node_pool.go
+++ b/google/resource_container_node_pool.go
@@ -57,6 +57,8 @@ func resourceContainerNodePool() *schema.Resource {
 				ForceNew: true,
 			},
 
+			"node_config": schemaNodeConfig,
+
 			"autoscaling": &schema.Schema{
 				Type:     schema.TypeList,
 				Optional: true,
@@ -121,6 +123,68 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e
 		InitialNodeCount: int64(nodeCount),
 	}
 
+	if v, ok := d.GetOk("node_config"); ok {
+		nodeConfigs := v.([]interface{})
+		nodeConfig := nodeConfigs[0].(map[string]interface{})
+
+		nodePool.Config = &container.NodeConfig{}
+
+		if v, ok = nodeConfig["machine_type"]; ok {
+			nodePool.Config.MachineType = v.(string)
+		}
+
+		if v, ok = nodeConfig["disk_size_gb"]; ok {
+			nodePool.Config.DiskSizeGb = int64(v.(int))
+		}
+
+		if v, ok = nodeConfig["local_ssd_count"]; ok {
+			nodePool.Config.LocalSsdCount = int64(v.(int))
+		}
+
+		if v, ok := nodeConfig["oauth_scopes"]; ok {
+			scopesList := v.([]interface{})
+			scopes := []string{}
+			for _, v := range scopesList {
+				scopes = append(scopes, canonicalizeServiceScope(v.(string)))
+			}
+
+			nodePool.Config.OauthScopes = scopes
+		}
+
+		if v, ok = nodeConfig["service_account"]; ok {
+			nodePool.Config.ServiceAccount = v.(string)
+		}
+
+		if v, ok = nodeConfig["metadata"]; ok {
+			m := make(map[string]string)
+			for k, val := range v.(map[string]interface{}) {
+				m[k] = val.(string)
+			}
+			nodePool.Config.Metadata = m
+		}
+
+		if v, ok = nodeConfig["image_type"]; ok {
+			nodePool.Config.ImageType = v.(string)
+		}
+
+		if v, ok = nodeConfig["labels"]; ok {
+			m := make(map[string]string)
+			for k, val := range v.(map[string]interface{}) {
+				m[k] = val.(string)
+			}
+			nodePool.Config.Labels = m
+		}
+
+		if v, ok := nodeConfig["tags"]; ok {
+			tagsList := v.([]interface{})
+			tags := []string{}
+			for _, v := range tagsList {
+				tags = append(tags, v.(string))
+			}
+			nodePool.Config.Tags = tags
+		}
+	}
+
 	if v, ok := d.GetOk("autoscaling"); ok {
 		autoscaling := v.([]interface{})[0].(map[string]interface{})
 		nodePool.Autoscaling = &container.NodePoolAutoscaling{
@@ -174,6 +238,7 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err
 
 	d.Set("name", nodePool.Name)
 	d.Set("initial_node_count", nodePool.InitialNodeCount)
+	d.Set("node_config", flattenClusterNodeConfig(nodePool.Config))
 
 	autoscaling := []map[string]interface{}{}
 	if nodePool.Autoscaling != nil && nodePool.Autoscaling.Enabled {
diff --git a/google/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go
index c2520e87727..546f2448438 100644
--- a/google/resource_container_node_pool_test.go
+++ b/google/resource_container_node_pool_test.go
@@ -2,7 +2,10 @@ package google
 
 import (
 	"fmt"
+	"reflect"
+	"sort"
 	"strconv"
+	"strings"
 	"testing"
 
 	"github.com/hashicorp/terraform/helper/acctest"
@@ -29,6 +32,38 @@ func TestAccContainerNodePool_basic(t *testing.T) {
 	})
 }
 
+func TestAccContainerNodePool_withNodeConfig(t *testing.T) {
+	resource.Test(t, resource.TestCase{
+		PreCheck:     func() { testAccPreCheck(t) },
+		Providers:    testAccProviders,
+		CheckDestroy: testAccCheckContainerNodePoolDestroy,
+		Steps: []resource.TestStep{
+			resource.TestStep{
+				Config: testAccContainerNodePool_withNodeConfig,
+				Check: resource.ComposeTestCheckFunc(
+					testAccCheckContainerNodePoolMatches("google_container_node_pool.np_with_node_config"),
+				),
+			},
+		},
+	})
+}
+
+func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) {
+	resource.Test(t, resource.TestCase{
+		PreCheck:     func() { testAccPreCheck(t) },
+		Providers:    testAccProviders,
+		CheckDestroy: testAccCheckContainerNodePoolDestroy,
+		Steps: []resource.TestStep{
+			resource.TestStep{
+				Config: testAccContainerNodePool_withNodeConfigScopeAlias,
+				Check: resource.ComposeTestCheckFunc(
+					testAccCheckContainerNodePoolMatches("google_container_node_pool.np_with_node_config_scope_alias"),
+				),
+			},
+		},
+	})
+}
+
 func TestAccContainerNodePool_autoscaling(t *testing.T) {
 	cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
 	np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
@@ -93,38 +128,53 @@ func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc {
 		}
 
 		attributes := rs.Primary.Attributes
-		found, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
+		nodepool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
 			config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do()
 		if err != nil {
 			return err
 		}
 
-		if found.Name != attributes["name"] {
+		if nodepool.Name != attributes["name"] {
 			return fmt.Errorf("NodePool not found")
 		}
 
-		inc, err := strconv.Atoi(attributes["initial_node_count"])
-		if err != nil {
-			return err
+		type nodepoolTestField struct {
+			tfAttr  string
+			gcpAttr interface{}
 		}
-		if found.InitialNodeCount != int64(inc) {
-			return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d",
-				attributes["initial_node_count"], found.InitialNodeCount)
+
+		nodepoolTests := []nodepoolTestField{
+			{"initial_node_count", strconv.FormatInt(nodepool.InitialNodeCount, 10)},
+			{"node_config.0.machine_type", nodepool.Config.MachineType},
+			{"node_config.0.disk_size_gb", strconv.FormatInt(nodepool.Config.DiskSizeGb, 10)},
+			{"node_config.0.local_ssd_count", strconv.FormatInt(nodepool.Config.LocalSsdCount, 10)},
+			{"node_config.0.oauth_scopes", nodepool.Config.OauthScopes},
+			{"node_config.0.service_account", nodepool.Config.ServiceAccount},
+			{"node_config.0.metadata", nodepool.Config.Metadata},
+			{"node_config.0.image_type", nodepool.Config.ImageType},
+			{"node_config.0.labels", nodepool.Config.Labels},
+			{"node_config.0.tags", nodepool.Config.Tags},
+		}
+
+		for _, attrs := range nodepoolTests {
+			if c := nodepoolCheckMatch(attributes, attrs.tfAttr, attrs.gcpAttr); c != "" {
+				return fmt.Errorf(c)
+			}
 		}
 
 		tfAS := attributes["autoscaling.#"] == "1"
-		if gcpAS := found.Autoscaling != nil && found.Autoscaling.Enabled == true; tfAS != gcpAS {
+		if gcpAS := nodepool.Autoscaling != nil && nodepool.Autoscaling.Enabled == true; tfAS != gcpAS {
 			return fmt.Errorf("Mismatched autoscaling status. TF State: %t. GCP State: %t", tfAS, gcpAS)
 		}
 		if tfAS {
-			if tf := attributes["autoscaling.0.min_node_count"]; strconv.FormatInt(found.Autoscaling.MinNodeCount, 10) != tf {
+			if tf := attributes["autoscaling.0.min_node_count"]; strconv.FormatInt(nodepool.Autoscaling.MinNodeCount, 10) != tf {
 				return fmt.Errorf("Mismatched Autoscaling.MinNodeCount. TF State: %s. GCP State: %d",
-					tf, found.Autoscaling.MinNodeCount)
+					tf, nodepool.Autoscaling.MinNodeCount)
 			}
 
-			if tf := attributes["autoscaling.0.max_node_count"]; strconv.FormatInt(found.Autoscaling.MaxNodeCount, 10) != tf {
+			if tf := attributes["autoscaling.0.max_node_count"]; strconv.FormatInt(nodepool.Autoscaling.MaxNodeCount, 10) != tf {
 				return fmt.Errorf("Mismatched Autoscaling.MaxNodeCount. TF State: %s. GCP State: %d",
-					tf, found.Autoscaling.MaxNodeCount)
+					tf, nodepool.Autoscaling.MaxNodeCount)
 			}
 
 		}
@@ -203,3 +253,130 @@ resource "google_container_node_pool" "np" {
 	}
 }`, cluster, np)
 }
+
+func nodepoolCheckMatch(attributes map[string]string, attr string, gcp interface{}) string {
+	if gcpList, ok := gcp.([]string); ok {
+		return nodepoolCheckListMatch(attributes, attr, gcpList)
+	}
+	if gcpMap, ok := gcp.(map[string]string); ok {
+		return nodepoolCheckMapMatch(attributes, attr, gcpMap)
+	}
+	tf := attributes[attr]
+	if tf != gcp {
+		return nodepoolMatchError(attr, tf, gcp)
+	}
+	return ""
+}
+
+func nodepoolCheckSetMatch(attributes map[string]string, attr string, gcpList []string) string {
+	num, err := strconv.Atoi(attributes[attr+".#"])
+	if err != nil {
+		return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err)
+	}
+	if num != len(gcpList) {
+		return fmt.Sprintf("NodePool has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpList))
+	}
+
+	// We don't know the exact keys of the elements, so go through the whole list looking for matching ones
+	tfAttr := []string{}
+	for k, v := range attributes {
+		if strings.HasPrefix(k, attr) && !strings.HasSuffix(k, "#") {
+			tfAttr = append(tfAttr, v)
+		}
+	}
+	sort.Strings(tfAttr)
+	sort.Strings(gcpList)
+	if reflect.DeepEqual(tfAttr, gcpList) {
+		return ""
+	}
+	return nodepoolMatchError(attr, tfAttr, gcpList)
+}
+
+func nodepoolCheckListMatch(attributes map[string]string, attr string, gcpList []string) string {
+	num, err := strconv.Atoi(attributes[attr+".#"])
+	if err != nil {
+		return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err)
+	}
+	if num != len(gcpList) {
+		return fmt.Sprintf("NodePool has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpList))
+	}
+
+	for i, gcp := range gcpList {
+		if tf := attributes[fmt.Sprintf("%s.%d", attr, i)]; tf != gcp {
+			return nodepoolMatchError(fmt.Sprintf("%s[%d]", attr, i), tf, gcp)
+		}
+	}
+
+	return ""
+}
+
+func nodepoolCheckMapMatch(attributes map[string]string, attr string, gcpMap map[string]string) string {
+	num, err := strconv.Atoi(attributes[attr+".%"])
+	if err != nil {
+		return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err)
+	}
+	if num != len(gcpMap) {
+		return fmt.Sprintf("NodePool has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpMap))
+	}
+
+	for k, gcp := range gcpMap {
+		if tf := attributes[fmt.Sprintf("%s.%s", attr, k)]; tf != gcp {
+			return nodepoolMatchError(fmt.Sprintf("%s[%s]", attr, k), tf, gcp)
+		}
+	}
+
+	return ""
+}
+
+func nodepoolMatchError(attr, tf interface{}, gcp interface{}) string {
+	return fmt.Sprintf("NodePool has mismatched %s.\nTF State: %+v\nGCP State: %+v", attr, tf, gcp)
+}
+
+var testAccContainerNodePool_withNodeConfig = fmt.Sprintf(`
+resource "google_container_cluster" "cluster" {
+	name = "tf-cluster-nodepool-test-%s"
+	zone = "us-central1-a"
+	initial_node_count = 1
+	master_auth {
+		username = "mr.yoda"
+		password = "adoy.rm"
+	}
+}
+resource "google_container_node_pool" "np_with_node_config" {
+	name = "tf-nodepool-test-%s"
+	zone = "us-central1-a"
+	cluster = "${google_container_cluster.cluster.name}"
+	initial_node_count = 1
+	node_config {
+		machine_type = "g1-small"
+		disk_size_gb = 10
+		oauth_scopes = [
+			"https://www.googleapis.com/auth/compute",
+			"https://www.googleapis.com/auth/devstorage.read_only",
+			"https://www.googleapis.com/auth/logging.write",
+			"https://www.googleapis.com/auth/monitoring"
+		]
+	}
+}`, acctest.RandString(10), acctest.RandString(10))
+
+var testAccContainerNodePool_withNodeConfigScopeAlias = fmt.Sprintf(`
+resource "google_container_cluster" "cluster" {
+	name = "tf-cluster-nodepool-test-%s"
+	zone = "us-central1-a"
+	initial_node_count = 1
+	master_auth {
+		username = "mr.yoda"
+		password = "adoy.rm"
+	}
+}
+resource "google_container_node_pool" "np_with_node_config_scope_alias" {
+	name = "tf-nodepool-test-%s"
+	zone = "us-central1-a"
+	cluster = "${google_container_cluster.cluster.name}"
+	initial_node_count = 1
+	node_config {
+		machine_type = "g1-small"
+		disk_size_gb = 10
+		oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"]
+	}
+}`, acctest.RandString(10), acctest.RandString(10))
diff --git a/website/docs/r/container_node_pool.html.markdown b/website/docs/r/container_node_pool.html.markdown
index 4264b9e6e4c..4476013ca19 100644
--- a/website/docs/r/container_node_pool.html.markdown
+++ b/website/docs/r/container_node_pool.html.markdown
@@ -68,12 +68,46 @@ resource "google_container_cluster" "primary" {
 * `name_prefix` - (Optional) Creates a unique name for the node pool beginning
     with the specified prefix. Conflicts with `name`.
 
+* `node_config` - (Optional) The machine type and image to use for all nodes in
+    this pool
+
 * `autoscaling` - (Optional) Configuration required by cluster autoscaler to adjust
     the size of the node pool to the current cluster usage. Structure is documented below.
 
+**Node Config** supports the following arguments:
+
+* `machine_type` - (Optional) The name of a Google Compute Engine machine type.
+    Defaults to `n1-standard-1`.
+
+* `disk_size_gb` - (Optional) Size of the disk attached to each node, specified
+    in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
+
+* `local_ssd_count` - (Optional) The amount of local SSD disks that will be
+    attached to each node pool. Defaults to 0.
+
+* `oauth_scopes` - (Optional) The set of Google API scopes to be made available
+    on all of the node VMs under the "default" service account. These can be
+    either FQDNs, or scope aliases. The following scopes are necessary to ensure
+    the correct functioning of the node pool:
+
+  * `compute-rw` (`https://www.googleapis.com/auth/compute`)
+  * `storage-ro` (`https://www.googleapis.com/auth/devstorage.read_only`)
+  * `logging-write` (`https://www.googleapis.com/auth/logging.write`),
+    if `logging_service` points to Google
+  * `monitoring` (`https://www.googleapis.com/auth/monitoring`),
+    if `monitoring_service` points to Google
+
+* `service_account` - (Optional) The service account to be used by the Node VMs.
+    If not specified, the "default" service account is used.
+
+* `metadata` - (Optional) The metadata key/value pairs assigned to instances in
+    the node pool.
+
+* `image_type` - (Optional) The image type to use for this node.
+
 The `autoscaling` block supports:
 
 * `minNodeCount` - (Required) Minimum number of nodes in the NodePool. Must be >=1 and
     <= `maxNodeCount`.
 
-* `maxNodeCount` - (Required) Maximum number of nodes in the NodePool. Must be >= minNodeCount.
\ No newline at end of file
+* `maxNodeCount` - (Required) Maximum number of nodes in the NodePool. Must be >= minNodeCount.