From 7841d590903b6759978f1e83086e60cf453eb1b5 Mon Sep 17 00:00:00 2001 From: Zoltan Illes Date: Mon, 7 Oct 2024 16:23:10 +0200 Subject: [PATCH 1/6] add orphan_resource_on_delete argument to ibm_container_vpc_worker_pool --- .../resource_ibm_container_vpc_worker_pool.go | 30 +- ...urce_ibm_container_vpc_worker_pool_test.go | 295 ++++++------------ .../r/container_vpc_worker_pool.html.markdown | 1 + 3 files changed, 113 insertions(+), 213 deletions(-) diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go index b09ba8b9aee..accb9a30c6b 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go @@ -214,6 +214,12 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource { Description: "Import an existing WorkerPool from the cluster, instead of creating a new", }, + "orphan_resource_on_delete": { + Type: schema.TypeBool, + Optional: true, + Description: "Orphan the Worker Pool resource, instead of deleting it", + }, + "autoscale_enabled": { Type: schema.TypeBool, Computed: true, @@ -715,14 +721,22 @@ func resourceIBMContainerVpcWorkerPoolDelete(d *schema.ResourceData, meta interf if err != nil { return err } - - err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) - if err != nil { - return err + var orphan_resource bool = false + if orod, ok := d.GetOk("orphan_resource_on_delete"); ok { + orphan_resource = orod.(bool) } - _, err = WaitForVpcWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutDelete), targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + + if orphan_resource { + log.Printf("[WARN] orphaning %s workerpool", workerPoolNameorID) + } else { + err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) + if err != nil { + return err + } + _, err = WaitForVpcWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutDelete), targetEnv) + if err != nil { + return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + } } d.SetId("") return nil @@ -788,7 +802,7 @@ func WaitForWorkerPoolAvailable(d *schema.ResourceData, meta interface{}, cluste func vpcWorkerPoolStateRefreshFunc(client v2.Workers, instanceID string, workerPoolNameOrID string, target v2.ClusterTargetHeader) resource.StateRefreshFunc { return func() (interface{}, string, error) { - workerFields, err := client.ListByWorkerPool(instanceID, "", false, target) + workerFields, err := client.ListByWorkerPool(instanceID, workerPoolNameOrID, false, target) if err != nil { return nil, "", fmt.Errorf("[ERROR] Error retrieving workers for cluster: %s", err) } diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go index 43f762b0c32..c252948a614 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go @@ -21,7 +21,7 @@ import ( func TestAccIBMContainerVpcClusterWorkerPoolBasic(t *testing.T) { - name := fmt.Sprintf("tf-vpc-worker-%d", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-vpc-workerpoolbasic-%d", acctest.RandIntRange(10, 100)) resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -36,6 +36,16 @@ func TestAccIBMContainerVpcClusterWorkerPoolBasic(t *testing.T) { "ibm_container_vpc_worker_pool.test_pool", "zones.#", "1"), resource.TestCheckResourceAttr( "ibm_container_vpc_worker_pool.test_pool", "labels.%", "2"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_count", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "flavor", "cx2.2x4"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "zones.#", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "labels.%", "0"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "worker_count", "1"), ), }, { @@ -44,17 +54,32 @@ func TestAccIBMContainerVpcClusterWorkerPoolBasic(t *testing.T) { resource.TestCheckResourceAttr( "ibm_container_vpc_worker_pool.test_pool", "flavor", "cx2.2x4"), resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "zones.#", "2"), + "ibm_container_vpc_worker_pool.test_pool", "zones.#", "1"), resource.TestCheckResourceAttr( "ibm_container_vpc_worker_pool.test_pool", "labels.%", "3"), resource.TestCheckResourceAttr( "ibm_container_vpc_worker_pool.test_pool", "operating_system", "UBUNTU_24_64"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_count", "2"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "flavor", "cx2.2x4"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "zones.#", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "labels.%", "2"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "worker_count", "2"), ), }, { - ResourceName: "ibm_container_vpc_worker_pool.test_pool", - ImportState: true, - ImportStateVerify: true, + ResourceName: "ibm_container_vpc_worker_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"orphan_resource_on_delete", "import_on_create"}, + }, + { + Config: testAccCheckIBMVpcContainerWorkerPoolUpdate(name), + Destroy: true, }, }, }) @@ -149,56 +174,60 @@ func testAccCheckIBMVpcContainerWorkerPoolDestroy(s *terraform.State) error { return nil } -func testAccCheckIBMVpcContainerWorkerPoolBasic(name string) string { +func testAccCheckIBMVpcContainerWorkerPoolBasic(cluster_name string) string { + workerpool_name := cluster_name + "-wp" return fmt.Sprintf(` - provider "ibm" { - region="us-south" - } data "ibm_resource_group" "resource_group" { is_default=true } - data "ibm_is_vpc" "vpc" { - name = "cluster-squad-dallas-test" - } - - data "ibm_is_subnet" "subnet1" { - name = "cluster-squad-dallas-test-01" - } - - data "ibm_is_subnet" "subnet2" { - name = "cluster-squad-dallas-test-02" - } resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = data.ibm_is_vpc.vpc.id + name = "%[3]s" + vpc_id = "%[1]s" flavor = "cx2.2x4" worker_count = 1 resource_group_id = data.ibm_resource_group.resource_group.id wait_till = "MasterNodeReady" zones { - subnet_id = data.ibm_is_subnet.subnet1.id + subnet_id = "%[2]s" name = "us-south-1" } } + + resource "ibm_container_vpc_worker_pool" "default_pool" { + cluster = ibm_container_vpc_cluster.cluster.id + vpc_id = "%[1]s" + flavor = "cx2.2x4" + worker_count = 1 + worker_pool_name = "default" + zones { + subnet_id = "%[2]s" + name = "us-south-1" + } + import_on_create = "true" + } resource "ibm_container_vpc_worker_pool" "test_pool" { cluster = ibm_container_vpc_cluster.cluster.id - worker_pool_name = "%[1]s" + worker_pool_name = "%[4]s" flavor = "cx2.2x4" - vpc_id = data.ibm_is_vpc.vpc.id + vpc_id = "%[1]s" worker_count = 1 resource_group_id = data.ibm_resource_group.resource_group.id operating_system = "UBUNTU_20_64" zones { - name = "us-south-2" - subnet_id = data.ibm_is_subnet.subnet2.id + name = "us-south-1" + subnet_id = "%[2]s" } labels = { "test" = "test-pool" "test1" = "test-pool1" } + depends_on = [ + ibm_container_vpc_worker_pool.default_pool + ] } - `, name) + `, acc.IksClusterVpcID, acc.IksClusterSubnetID, cluster_name, workerpool_name) + } func testAccCheckIBMVpcContainerWorkerPoolSecurityGroups(name string) string { @@ -265,64 +294,64 @@ func testAccCheckIBMVpcContainerWorkerPoolSecurityGroups(name string) string { `, name) } -func testAccCheckIBMVpcContainerWorkerPoolUpdate(name string) string { +func testAccCheckIBMVpcContainerWorkerPoolUpdate(cluster_name string) string { + workerpool_name := cluster_name + "-wp" return fmt.Sprintf(` - provider "ibm" { - region="eu-de" - } data "ibm_resource_group" "resource_group" { is_default=true } - resource "ibm_is_vpc" "vpc" { - name = "%[1]s" - } - resource "ibm_is_subnet" "subnet1" { - name = "%[1]s-1" - vpc = ibm_is_vpc.vpc.id - zone = "eu-de-1" - total_ipv4_address_count = 256 - } - resource "ibm_is_subnet" "subnet2" { - name = "%[1]s-2" - vpc = ibm_is_vpc.vpc.id - zone = "eu-de-2" - total_ipv4_address_count = 256 - } + resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = ibm_is_vpc.vpc.id + name = "%[3]s" + vpc_id = "%[1]s" flavor = "cx2.2x4" worker_count = 1 resource_group_id = data.ibm_resource_group.resource_group.id wait_till = "MasterNodeReady" zones { - subnet_id = ibm_is_subnet.subnet1.id - name = "eu-de-1" + subnet_id = "%[2]s" + name = "us-south-1" + } + } + resource "ibm_container_vpc_worker_pool" "default_pool" { + cluster = ibm_container_vpc_cluster.cluster.id + vpc_id = "%[1]s" + flavor = "cx2.2x4" + worker_count = 2 + worker_pool_name = "default" + zones { + subnet_id = "%[2]s" + name = "us-south-1" + } + import_on_create = "true" + labels = { + "test" = "default-pool" + "test1" = "default-pool1" } } resource "ibm_container_vpc_worker_pool" "test_pool" { cluster = ibm_container_vpc_cluster.cluster.id - worker_pool_name = "%[1]s" + worker_pool_name = "%[4]s" flavor = "cx2.2x4" - vpc_id = ibm_is_vpc.vpc.id - worker_count = 1 + vpc_id = "%[1]s" + worker_count = 2 resource_group_id = data.ibm_resource_group.resource_group.id operating_system = "UBUNTU_24_64" zones { - name = "eu-de-2" - subnet_id = ibm_is_subnet.subnet2.id - } - zones { - subnet_id = ibm_is_subnet.subnet1.id - name = "eu-de-1" + name = "us-south-1" + subnet_id = "%[2]s" } labels = { "test" = "test-pool" "test1" = "test-pool1" "test2" = "test-pool2" } + depends_on = [ + ibm_container_vpc_worker_pool.default_pool + ] + orphan_resource_on_delete = "true" } - `, name) + `, acc.IksClusterVpcID, acc.IksClusterSubnetID, cluster_name, workerpool_name) } func TestAccIBMContainerVpcClusterWorkerPoolEnvvar(t *testing.T) { @@ -548,147 +577,3 @@ func testAccCheckIBMOpcContainerWorkerPoolBasic(name, openshiftFlavour, openShif } `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID, openshiftFlavour, openShiftworkerCount, operatingSystem) } - -func TestAccIBMContainerVpcClusterWorkerPoolImportOnCreateEnvvar(t *testing.T) { - - name := fmt.Sprintf("tf-vpc-worker-%d", acctest.RandIntRange(10, 100)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIBMVpcContainerWorkerPoolDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckIBMOpcContainerWorkerPoolImportOnCreate(name), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "labels.%", "1"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_count", "1"), - ), - }, - { - Config: testAccCheckIBMOpcContainerWorkerPoolImportOnCreateClusterUpdate(name), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "labels.%", "1"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_count", "1"), - ), - }, - { - Config: testAccCheckIBMOpcContainerWorkerPoolImportOnCreateWPUpdate(name), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "labels.%", "1"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_count", "3"), - ), - }, - }, - }) -} -func testAccCheckIBMOpcContainerWorkerPoolImportOnCreate(name string) string { - return fmt.Sprintf(` - resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 1 - resource_group_id = "%[3]s" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - wait_till = "normal" - worker_labels = { - "test" = "test-pool" - } - } - - resource "ibm_container_vpc_worker_pool" "test_pool" { - cluster = ibm_container_vpc_cluster.cluster.id - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 1 - worker_pool_name = "default" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - import_on_create = "true" - } - `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID) -} - -func testAccCheckIBMOpcContainerWorkerPoolImportOnCreateClusterUpdate(name string) string { - return fmt.Sprintf(` - resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 3 - resource_group_id = "%[3]s" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - wait_till = "normal" - worker_labels = { - "test" = "test-pool" - } - } - - resource "ibm_container_vpc_worker_pool" "test_pool" { - cluster = ibm_container_vpc_cluster.cluster.id - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 1 - worker_pool_name = "default" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - import_on_create = "true" - } - `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID) -} - -func testAccCheckIBMOpcContainerWorkerPoolImportOnCreateWPUpdate(name string) string { - return fmt.Sprintf(` - resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 1 - resource_group_id = "%[3]s" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - wait_till = "normal" - worker_labels = { - "test" = "test-pool" - } - } - - resource "ibm_container_vpc_worker_pool" "test_pool" { - cluster = ibm_container_vpc_cluster.cluster.id - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 3 - worker_pool_name = "default" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - import_on_create = "true" - } - `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID) -} diff --git a/website/docs/r/container_vpc_worker_pool.html.markdown b/website/docs/r/container_vpc_worker_pool.html.markdown index 01423f0e363..d85e80c4a42 100644 --- a/website/docs/r/container_vpc_worker_pool.html.markdown +++ b/website/docs/r/container_vpc_worker_pool.html.markdown @@ -105,6 +105,7 @@ Review the argument references that you can specify for your resource. - `kms_instance_id` - Instance ID for boot volume encryption. - `kms_account_id` - Account ID for boot volume encryption, if other account is providing the kms. - `import_on_create` - (Optional, Bool) Import an existing WorkerPool from the cluster, instead of creating a new. +- `orphan_resource_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows to remove the worker pool from the state, without deleting the actual cloud resource. - `security_groups` - (Optional, List) Enables users to define specific security groups for their workers. ## Attribute reference From 2f89e3e7365e39c723306ba3d188ba008d675a2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Zolt=C3=A1n=20Ill=C3=A9s?= Date: Tue, 8 Oct 2024 13:36:22 +0200 Subject: [PATCH 2/6] Update website/docs/r/container_vpc_worker_pool.html.markdown Co-authored-by: lewiseevans --- website/docs/r/container_vpc_worker_pool.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/container_vpc_worker_pool.html.markdown b/website/docs/r/container_vpc_worker_pool.html.markdown index d85e80c4a42..27b99534da4 100644 --- a/website/docs/r/container_vpc_worker_pool.html.markdown +++ b/website/docs/r/container_vpc_worker_pool.html.markdown @@ -105,7 +105,7 @@ Review the argument references that you can specify for your resource. - `kms_instance_id` - Instance ID for boot volume encryption. - `kms_account_id` - Account ID for boot volume encryption, if other account is providing the kms. - `import_on_create` - (Optional, Bool) Import an existing WorkerPool from the cluster, instead of creating a new. -- `orphan_resource_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows to remove the worker pool from the state, without deleting the actual cloud resource. +- `orphan_resource_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. - `security_groups` - (Optional, List) Enables users to define specific security groups for their workers. ## Attribute reference From 6fc5a410e44179f6e585ab629e9ea63be92ea881 Mon Sep 17 00:00:00 2001 From: Zoltan Illes Date: Tue, 8 Oct 2024 13:38:46 +0200 Subject: [PATCH 3/6] rename orphan_resource_on_delete to orphan_on_delete --- .../kubernetes/resource_ibm_container_vpc_worker_pool.go | 4 ++-- .../kubernetes/resource_ibm_container_vpc_worker_pool_test.go | 4 ++-- website/docs/r/container_vpc_worker_pool.html.markdown | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go index accb9a30c6b..d1afd794fea 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go @@ -214,7 +214,7 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource { Description: "Import an existing WorkerPool from the cluster, instead of creating a new", }, - "orphan_resource_on_delete": { + "orphan_on_delete": { Type: schema.TypeBool, Optional: true, Description: "Orphan the Worker Pool resource, instead of deleting it", @@ -722,7 +722,7 @@ func resourceIBMContainerVpcWorkerPoolDelete(d *schema.ResourceData, meta interf return err } var orphan_resource bool = false - if orod, ok := d.GetOk("orphan_resource_on_delete"); ok { + if orod, ok := d.GetOk("orphan_on_delete"); ok { orphan_resource = orod.(bool) } diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go index c252948a614..54418c2509d 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go @@ -75,7 +75,7 @@ func TestAccIBMContainerVpcClusterWorkerPoolBasic(t *testing.T) { ResourceName: "ibm_container_vpc_worker_pool.test_pool", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"orphan_resource_on_delete", "import_on_create"}, + ImportStateVerifyIgnore: []string{"orphan_on_delete", "import_on_create"}, }, { Config: testAccCheckIBMVpcContainerWorkerPoolUpdate(name), @@ -349,7 +349,7 @@ func testAccCheckIBMVpcContainerWorkerPoolUpdate(cluster_name string) string { depends_on = [ ibm_container_vpc_worker_pool.default_pool ] - orphan_resource_on_delete = "true" + orphan_on_delete = "true" } `, acc.IksClusterVpcID, acc.IksClusterSubnetID, cluster_name, workerpool_name) } diff --git a/website/docs/r/container_vpc_worker_pool.html.markdown b/website/docs/r/container_vpc_worker_pool.html.markdown index 27b99534da4..5c2106ff2a6 100644 --- a/website/docs/r/container_vpc_worker_pool.html.markdown +++ b/website/docs/r/container_vpc_worker_pool.html.markdown @@ -105,7 +105,7 @@ Review the argument references that you can specify for your resource. - `kms_instance_id` - Instance ID for boot volume encryption. - `kms_account_id` - Account ID for boot volume encryption, if other account is providing the kms. - `import_on_create` - (Optional, Bool) Import an existing WorkerPool from the cluster, instead of creating a new. -- `orphan_resource_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. +- `orphan_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. - `security_groups` - (Optional, List) Enables users to define specific security groups for their workers. ## Attribute reference From d9e98bcb1fb666278b3f3e69937f84b9999d3c8c Mon Sep 17 00:00:00 2001 From: Zoltan Illes Date: Tue, 8 Oct 2024 16:46:56 +0200 Subject: [PATCH 4/6] add orphan_on_delete to the classic workerpool resource --- .../resource_ibm_container_vpc_worker_pool.go | 4 +- .../resource_ibm_container_worker_pool.go | 30 ++- ...resource_ibm_container_worker_pool_test.go | 214 +++++------------- .../r/container_worker_pool.html.markdown | 2 + 4 files changed, 89 insertions(+), 161 deletions(-) diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go index d1afd794fea..09a8d6ac666 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go @@ -211,13 +211,13 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource { Type: schema.TypeBool, Optional: true, DiffSuppressFunc: flex.ApplyOnce, - Description: "Import an existing WorkerPool from the cluster, instead of creating a new", + Description: "Import an existing workerpool from the cluster instead of creating a new", }, "orphan_on_delete": { Type: schema.TypeBool, Optional: true, - Description: "Orphan the Worker Pool resource, instead of deleting it", + Description: "Orphan the workerpool resource instead of deleting it", }, "autoscale_enabled": { diff --git a/ibm/service/kubernetes/resource_ibm_container_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_worker_pool.go index 7e2aaf522cc..2066a8aae6c 100644 --- a/ibm/service/kubernetes/resource_ibm_container_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_worker_pool.go @@ -5,6 +5,7 @@ package kubernetes import ( "fmt" + "log" "strings" "time" @@ -196,7 +197,13 @@ func ResourceIBMContainerWorkerPool() *schema.Resource { Type: schema.TypeBool, Optional: true, DiffSuppressFunc: flex.ApplyOnce, - Description: "Import a workerpool from a cluster", + Description: "Import an existing workerpool from the cluster instead of creating a new", + }, + + "orphan_on_delete": { + Type: schema.TypeBool, + Optional: true, + Description: "Orphan the workerpool resource instead of deleting it", }, "autoscale_enabled": { @@ -475,13 +482,22 @@ func resourceIBMContainerWorkerPoolDelete(d *schema.ResourceData, meta interface return err } - err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) - if err != nil { - return err + var orphan_resource bool = false + if orod, ok := d.GetOk("orphan_on_delete"); ok { + orphan_resource = orod.(bool) } - _, err = WaitForWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutUpdate), targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + + if orphan_resource { + log.Printf("[WARN] orphaning %s workerpool", workerPoolNameorID) + } else { + err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) + if err != nil { + return err + } + _, err = WaitForWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutUpdate), targetEnv) + if err != nil { + return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + } } return nil } diff --git a/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go b/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go index 330d7c82ada..0dd451e81b3 100644 --- a/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go @@ -44,6 +44,12 @@ func TestAccIBMContainerWorkerPoolBasic(t *testing.T) { "ibm_container_worker_pool.test_pool", "disk_encryption", "true"), resource.TestCheckResourceAttr( "ibm_container_worker_pool.test_pool", "hardware", "shared"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "size_per_zone", "1"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "labels.%", "0"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "hardware", "shared"), ), }, { @@ -61,6 +67,12 @@ func TestAccIBMContainerWorkerPoolBasic(t *testing.T) { "ibm_container_worker_pool.test_pool", "disk_encryption", "true"), resource.TestCheckResourceAttr( "ibm_container_worker_pool.test_pool", "hardware", "shared"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "size_per_zone", "2"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "labels.%", "2"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "hardware", "shared"), ), }, { @@ -181,13 +193,13 @@ func testAccCheckIBMContainerWorkerPoolBasic(clusterName, workerPoolName string) return fmt.Sprintf(` resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" + name = "%[1]s" + datacenter = "%[2]s" + machine_type = "%[3]s" hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" + public_vlan_id = "%[4]s" + private_vlan_id = "%[5]s" + kube_version = "%[6]s" wait_till = "OneWorkerNodeReady" operating_system = "UBUNTU_20_64" taints { @@ -197,9 +209,22 @@ resource "ibm_container_cluster" "testacc_cluster" { } } +resource "ibm_container_worker_pool" "default_pool" { + worker_pool_name = "default" + machine_type = "%[3]s" + cluster = ibm_container_cluster.testacc_cluster.id + size_per_zone = 1 + import_on_create = "true" + taints { + key = "key1" + value = "value1" + effect = "NoSchedule" + } +} + resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "%s" - machine_type = "%s" + worker_pool_name = "%[7]s" + machine_type = "%[8]s" cluster = ibm_container_cluster.testacc_cluster.id size_per_zone = 1 hardware = "shared" @@ -220,19 +245,40 @@ func testAccCheckIBMContainerWorkerPoolUpdate(clusterName, workerPoolName string return fmt.Sprintf(` resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" + name = "%[1]s" + datacenter = "%[2]s" + machine_type = "%[3]s" hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" + public_vlan_id = "%[4]s" + private_vlan_id = "%[5]s" + kube_version = "%[6]s" wait_till = "OneWorkerNodeReady" } +resource "ibm_container_worker_pool" "default_pool" { + worker_pool_name = "default" + machine_type = "%[3]s" + cluster = ibm_container_cluster.testacc_cluster.id + size_per_zone = 2 + import_on_create = "true" + taints { + key = "key1" + value = "value1" + effect = "NoSchedule" + } + labels = { + "test" = "test-pool" + "test1" = "test-pool1" + } + depends_on = [ + ibm_container_worker_pool.test_pool + ] + orphan_on_delete = "true" +} + resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "%s" - machine_type = "%s" + worker_pool_name = "%[7]s" + machine_type = "%[8]s" cluster = ibm_container_cluster.testacc_cluster.id size_per_zone = 2 hardware = "shared" @@ -327,139 +373,3 @@ resource "ibm_container_worker_pool" "test_pool" { } }`, workerPoolName, acc.MachineType, clusterName) } - -func TestAccIBMContainerWorkerPoolImportOnCreate(t *testing.T) { - - clusterName := fmt.Sprintf("tf-cluster-worker-%d", acctest.RandIntRange(10, 100)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIBMContainerWorkerPoolDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckIBMContainerWorkerPoolImportOnCreate(clusterName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "size_per_zone", "1"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "labels.%", "2"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "hardware", "shared"), - ), - }, - { - Config: testAccCheckIBMContainerWorkerPoolImportOnCreateClusterUpdate(clusterName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "size_per_zone", "1"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "labels.%", "2"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "hardware", "shared"), - ), - }, - { - Config: testAccCheckIBMContainerWorkerPoolImportOnCreateWPUpdate(clusterName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "size_per_zone", "3"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "labels.%", "2"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "hardware", "shared"), - ), - }, - }, - }) -} - -func testAccCheckIBMContainerWorkerPoolImportOnCreate(clusterName string) string { - return fmt.Sprintf(` - -resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" - hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" - wait_till = "OneWorkerNodeReady" - default_pool_size = 1 - labels = { - "test" = "test-pool" - "test1" = "test-pool1" - } -} - -resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "default" - machine_type = "%[3]s" - cluster = ibm_container_cluster.testacc_cluster.id - size_per_zone = 1 - import_on_create = "true" -}`, clusterName, acc.Datacenter, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, acc.KubeVersion) -} - -func testAccCheckIBMContainerWorkerPoolImportOnCreateClusterUpdate(clusterName string) string { - return fmt.Sprintf(` - -resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" - hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" - wait_till = "OneWorkerNodeReady" - default_pool_size = 3 - labels = { - "test" = "test-pool" - "test1" = "test-pool1" - } -} - -resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "default" - machine_type = "%[3]s" - cluster = ibm_container_cluster.testacc_cluster.id - size_per_zone = 1 - import_on_create = "true" -}`, clusterName, acc.Datacenter, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, acc.KubeVersion) -} - -func testAccCheckIBMContainerWorkerPoolImportOnCreateWPUpdate(clusterName string) string { - return fmt.Sprintf(` - -resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" - hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" - wait_till = "OneWorkerNodeReady" - default_pool_size = 1 - labels = { - "test" = "test-pool" - "test1" = "test-pool1" - } -} - -resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "default" - machine_type = "%[3]s" - cluster = ibm_container_cluster.testacc_cluster.id - size_per_zone = 3 - import_on_create = "true" -}`, clusterName, acc.Datacenter, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, acc.KubeVersion) -} diff --git a/website/docs/r/container_worker_pool.html.markdown b/website/docs/r/container_worker_pool.html.markdown index b2fc5f333ff..278a23684a0 100644 --- a/website/docs/r/container_worker_pool.html.markdown +++ b/website/docs/r/container_worker_pool.html.markdown @@ -77,6 +77,8 @@ Review the argument references that you can specify for your resource. - `key` - (Required, String) Key for taint. - `value` - (Required, String) Value for taint. - `effect` - (Required, String) Effect for taint. Accepted values are `NoSchedule`, `PreferNoSchedule`, and `NoExecute`. +- `import_on_create` - (Optional, Bool) Import an existing WorkerPool from the cluster, instead of creating a new. +- `orphan_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. **Deprecated reference** From 8a58c3ee01b5bbc1e11c500362c06c84e44674cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Zolt=C3=A1n=20Ill=C3=A9s?= Date: Fri, 11 Oct 2024 13:47:42 +0200 Subject: [PATCH 5/6] Apply suggestions from code review for orphan_on_delete doc Co-authored-by: lewiseevans --- website/docs/r/container_vpc_worker_pool.html.markdown | 2 +- website/docs/r/container_worker_pool.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/container_vpc_worker_pool.html.markdown b/website/docs/r/container_vpc_worker_pool.html.markdown index 5c2106ff2a6..c8739395ea4 100644 --- a/website/docs/r/container_vpc_worker_pool.html.markdown +++ b/website/docs/r/container_vpc_worker_pool.html.markdown @@ -105,7 +105,7 @@ Review the argument references that you can specify for your resource. - `kms_instance_id` - Instance ID for boot volume encryption. - `kms_account_id` - Account ID for boot volume encryption, if other account is providing the kms. - `import_on_create` - (Optional, Bool) Import an existing WorkerPool from the cluster, instead of creating a new. -- `orphan_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. +- `orphan_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows the user to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. - `security_groups` - (Optional, List) Enables users to define specific security groups for their workers. ## Attribute reference diff --git a/website/docs/r/container_worker_pool.html.markdown b/website/docs/r/container_worker_pool.html.markdown index 278a23684a0..6a427b4bce7 100644 --- a/website/docs/r/container_worker_pool.html.markdown +++ b/website/docs/r/container_worker_pool.html.markdown @@ -78,7 +78,7 @@ Review the argument references that you can specify for your resource. - `value` - (Required, String) Value for taint. - `effect` - (Required, String) Effect for taint. Accepted values are `NoSchedule`, `PreferNoSchedule`, and `NoExecute`. - `import_on_create` - (Optional, Bool) Import an existing WorkerPool from the cluster, instead of creating a new. -- `orphan_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. +- `orphan_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows the user to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. **Deprecated reference** From 6a4da0d97e9c6b902a74da184995f1c2a52ebeb2 Mon Sep 17 00:00:00 2001 From: Zoltan Illes Date: Sun, 13 Oct 2024 17:28:31 +0200 Subject: [PATCH 6/6] rename vars to orphan_on_delete --- .../kubernetes/resource_ibm_container_vpc_worker_pool.go | 6 +++--- .../kubernetes/resource_ibm_container_worker_pool.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go index 09a8d6ac666..0b9a09208f7 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go @@ -721,12 +721,12 @@ func resourceIBMContainerVpcWorkerPoolDelete(d *schema.ResourceData, meta interf if err != nil { return err } - var orphan_resource bool = false + var orphan_on_delete bool = false if orod, ok := d.GetOk("orphan_on_delete"); ok { - orphan_resource = orod.(bool) + orphan_on_delete = orod.(bool) } - if orphan_resource { + if orphan_on_delete { log.Printf("[WARN] orphaning %s workerpool", workerPoolNameorID) } else { err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) diff --git a/ibm/service/kubernetes/resource_ibm_container_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_worker_pool.go index 2066a8aae6c..46d33b71206 100644 --- a/ibm/service/kubernetes/resource_ibm_container_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_worker_pool.go @@ -482,12 +482,12 @@ func resourceIBMContainerWorkerPoolDelete(d *schema.ResourceData, meta interface return err } - var orphan_resource bool = false + var orphan_on_delete bool = false if orod, ok := d.GetOk("orphan_on_delete"); ok { - orphan_resource = orod.(bool) + orphan_on_delete = orod.(bool) } - if orphan_resource { + if orphan_on_delete { log.Printf("[WARN] orphaning %s workerpool", workerPoolNameorID) } else { err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv)