diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 48a7a6304563..ee2a399ee906 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -112,7 +112,6 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { "vm_size": { Type: pluginsdk.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -141,14 +140,13 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { }, false), }, - "kubelet_config": schemaNodePoolKubeletConfigForceNew(), + "kubelet_config": schemaNodePoolKubeletConfig(), - "linux_os_config": schemaNodePoolLinuxOSConfigForceNew(), + "linux_os_config": schemaNodePoolLinuxOSConfig(), "fips_enabled": { Type: pluginsdk.TypeBool, Optional: true, - ForceNew: true, }, "gpu_instance": { @@ -184,7 +182,6 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { Type: pluginsdk.TypeInt, Optional: true, Computed: true, - ForceNew: true, }, "mode": { @@ -242,7 +239,6 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { "os_disk_size_gb": { Type: pluginsdk.TypeInt, Optional: true, - ForceNew: true, Computed: true, ValidateFunc: validation.IntAtLeast(1), }, @@ -250,7 +246,6 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { "os_disk_type": { Type: pluginsdk.TypeString, Optional: true, - ForceNew: true, Default: agentpools.OSDiskTypeManaged, ValidateFunc: validation.StringInSlice([]string{ string(agentpools.OSDiskTypeEphemeral), @@ -284,7 +279,6 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { "pod_subnet_id": { Type: pluginsdk.TypeString, Optional: true, - ForceNew: true, ValidateFunc: commonids.ValidateSubnetID, }, @@ -309,7 +303,6 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { "snapshot_id": { Type: pluginsdk.TypeString, Optional: true, - ForceNew: true, ValidateFunc: snapshots.ValidateSnapshotID, }, @@ -331,9 +324,14 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { }, false), }, + "temporary_name_for_rotation": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: containerValidate.KubernetesAgentPoolName, + }, + "ultra_ssd_enabled": { Type: pluginsdk.TypeBool, - ForceNew: true, Default: false, Optional: true, }, @@ -341,7 +339,6 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { "vnet_subnet_id": { Type: pluginsdk.TypeString, Optional: true, - ForceNew: true, ValidateFunc: commonids.ValidateSubnetID, }, @@ -373,7 +370,7 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { }, false), }, - "zones": commonschema.ZonesMultipleOptionalForceNew(), + "zones": commonschema.ZonesMultipleOptional(), "auto_scaling_enabled": { Type: pluginsdk.TypeBool, @@ -383,13 +380,11 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { "node_public_ip_enabled": { Type: pluginsdk.TypeBool, Optional: true, - ForceNew: true, }, "host_encryption_enabled": { Type: pluginsdk.TypeBool, Optional: true, - ForceNew: true, }, } @@ -723,10 +718,39 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int props.EnableAutoScaling = utils.Bool(enableAutoScaling) } + if d.HasChange("fips_enabled") { + props.EnableFIPS = pointer.To(d.Get("fips_enabled").(bool)) + } + + if d.HasChange("host_encryption_enabled") { + props.EnableEncryptionAtHost = pointer.To(d.Get("host_encryption_enabled").(bool)) + } + + if d.HasChange("kubelet_config") { + kubeletConfigRaw := d.Get("kubelet_config").([]interface{}) + props.KubeletConfig = expandAgentPoolKubeletConfig(kubeletConfigRaw) + } + + if d.HasChange("linux_os_config") { + linuxOSConfigRaw := d.Get("linux_os_config").([]interface{}) + if d.Get("os_type").(string) != string(managedclusters.OSTypeLinux) { + return fmt.Errorf("`linux_os_config` can only be configured when `os_type` is set to `linux`") + } + linuxOSConfig, err := expandAgentPoolLinuxOSConfig(linuxOSConfigRaw) + if err != nil { + return err + } + props.LinuxOSConfig = linuxOSConfig + } + if d.HasChange("max_count") || enableAutoScaling { props.MaxCount = utils.Int64(int64(d.Get("max_count").(int))) } + if d.HasChange("max_pods") { + props.MaxPods = pointer.To(int64(d.Get("max_pods").(int))) + } + if d.HasChange("mode") { mode := agentpools.AgentPoolMode(d.Get("mode").(string)) props.Mode = &mode @@ -740,8 +764,12 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int props.Count = utils.Int64(int64(d.Get("node_count").(int))) } + if d.HasChange("node_public_ip_enabled") { + props.EnableNodePublicIP = pointer.To(d.Get("node_public_ip_enabled").(bool)) + } + if d.HasChange("node_public_ip_prefix_id") { - props.NodePublicIPPrefixID = utils.String(d.Get("node_public_ip_prefix_id").(string)) + props.NodePublicIPPrefixID = pointer.To(d.Get("node_public_ip_prefix_id").(string)) } if d.HasChange("orchestrator_version") { @@ -768,10 +796,26 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int props.Tags = tags.Expand(t) } + if d.HasChange("os_disk_type") { + props.OsDiskType = pointer.To(agentpools.OSDiskType(d.Get("os_disk_type").(string))) + } + + if d.HasChange("os_disk_size_gb") { + props.OsDiskSizeGB = pointer.To(int64(d.Get("os_disk_size_gb").(int))) + } + if d.HasChange("os_sku") { props.OsSKU = pointer.To(agentpools.OSSKU(d.Get("os_sku").(string))) } + if d.HasChange("pod_subnet_id") { + props.PodSubnetID = pointer.To(d.Get("pod_subnet_id").(string)) + } + + if d.HasChange("ultra_ssd_enabled") { + props.EnableUltraSSD = pointer.To(d.Get("ultra_ssd_enabled").(bool)) + } + if d.HasChange("upgrade_settings") { upgradeSettingsRaw := d.Get("upgrade_settings").([]interface{}) props.UpgradeSettings = expandAgentPoolUpgradeSettings(upgradeSettingsRaw) @@ -781,6 +825,27 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int mode := agentpools.ScaleDownMode(d.Get("scale_down_mode").(string)) props.ScaleDownMode = &mode } + + if d.HasChange("snapshot_id") { + props.CreationData = &agentpools.CreationData{ + SourceResourceId: pointer.To(d.Get("snapshot_id").(string)), + } + } + + if d.HasChange("vm_size") { + props.VMSize = pointer.To(d.Get("vm_size").(string)) + } + + if d.HasChange("vnet_subnet_id") { + if subnetIDValue, ok := d.GetOk("vnet_subnet_id"); ok { + subnetID, err := commonids.ParseSubnetID(subnetIDValue.(string)) + if err != nil { + return err + } + props.VnetSubnetID = pointer.To(subnetID.ID()) + } + } + if d.HasChange("workload_runtime") { runtime := agentpools.WorkloadRuntime(d.Get("workload_runtime").(string)) props.WorkloadRuntime = &runtime @@ -798,6 +863,11 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int props.NetworkProfile = expandAgentPoolNetworkProfile(d.Get("node_network_profile").([]interface{})) } + if d.HasChange("zones") { + zones := zones.ExpandUntyped(d.Get("zones").(*schema.Set).List()) + props.AvailabilityZones = &zones + } + // validate the auto-scale fields are both set/unset to prevent a continual diff maxCount := 0 if props.MaxCount != nil { @@ -825,11 +895,91 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int props.MinCount = nil } - log.Printf("[DEBUG] Updating existing %s..", *id) - existing.Model.Properties = props - err = client.CreateOrUpdateThenPoll(ctx, *id, *existing.Model) - if err != nil { - return fmt.Errorf("updating Node Pool %s: %+v", *id, err) + // evaluate if the nodepool needs to be cycled + cycleNodePoolProperties := []string{ + "fips_enabled", + "host_encryption_enabled", + "kubelet_config", + "linux_os_config", + "max_pods", + "node_public_ip_enabled", + "os_disk_size_gb", + "os_disk_type", + "pod_subnet_id", + "snapshot_id", + "ultra_ssd_enabled", + "vm_size", + "vnet_subnet_id", + "zones", + } + + // if the node pool name has changed, it means the initial attempt at resizing failed + cycleNodePool := d.HasChanges(cycleNodePoolProperties...) + // os_sku can only be updated if the current and new os_sku are either Ubuntu or AzureLinux + if d.HasChange("os_sku") { + oldOsSkuRaw, newOsSkuRaw := d.GetChange("os_sku") + oldOsSku := oldOsSkuRaw.(string) + newOsSku := newOsSkuRaw.(string) + if oldOsSku != string(managedclusters.OSSKUUbuntu) && oldOsSku != string(managedclusters.OSSKUAzureLinux) { + cycleNodePool = true + } + if newOsSku != string(managedclusters.OSSKUUbuntu) && newOsSku != string(managedclusters.OSSKUAzureLinux) { + cycleNodePool = true + } + } + + if cycleNodePool { + log.Printf("[DEBUG] Cycling Node Pool..") + // to provide a seamless updating experience for the node pool we need to cycle it by provisioning a temporary one, + // tearing down the existing node pool and then bringing up the new one. + + if v := d.Get("temporary_name_for_rotation").(string); v == "" { + return fmt.Errorf("`temporary_name_for_rotation` must be specified when updating any of the following properties %q", cycleNodePoolProperties) + } + + temporaryNodePoolName := d.Get("temporary_name_for_rotation").(string) + tempNodePoolId := agentpools.NewAgentPoolID(id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, temporaryNodePoolName) + + tempExisting, err := client.Get(ctx, tempNodePoolId) + if !response.WasNotFound(tempExisting.HttpResponse) && err != nil { + return fmt.Errorf("checking for existing temporary node pool %s: %+v", tempNodePoolId, err) + } + + tempAgentProfile := *existing.Model + tempAgentProfile.Name = &temporaryNodePoolName + + // if the temp node pool already exists due to a previous failure, don't bother spinning it up. + // the temporary nodepool is created with the new values + if tempExisting.Model == nil { + if err := retryNodePoolCreation(ctx, client, tempNodePoolId, tempAgentProfile); err != nil { + return fmt.Errorf("creating temporary %s: %+v", tempNodePoolId, err) + } + } + + // delete the old node pool if it exists + if existing.Model != nil { + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting old %s: %+v", *id, err) + } + } + + // create the new node pool with the new data + if err := retryNodePoolCreation(ctx, client, *id, *existing.Model); err != nil { + log.Printf("[DEBUG] Creation of redefined node pool failed") + return fmt.Errorf("creating default %s: %+v", *id, err) + } + + if err := client.DeleteThenPoll(ctx, tempNodePoolId); err != nil { + return fmt.Errorf("deleting temporary %s: %+v", tempNodePoolId, err) + } + + log.Printf("[DEBUG] Cycled Node Pool..") + } else { + log.Printf("[DEBUG] Updating existing %s..", *id) + err = client.CreateOrUpdateThenPoll(ctx, *id, *existing.Model) + if err != nil { + return fmt.Errorf("updating Node Pool %s: %+v", *id, err) + } } d.Partial(false) diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go index c8a2f658049a..dd0daaaf3537 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/hashicorp/go-azure-helpers/lang/pointer" "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2024-05-01/agentpools" @@ -309,6 +310,28 @@ func TestAccKubernetesClusterNodePool_manualScaleUpdate(t *testing.T) { }) } +func TestAccKubernetesClusterNodePool_manualScaleVMSku(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + r := KubernetesClusterNodePoolResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.manualScaleVMSkuConfig(data, "Standard_F2s_v2"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("temporary_name_for_rotation"), + { + Config: r.manualScaleVMSkuConfig(data, "Standard_F4s_v2"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("temporary_name_for_rotation"), + }) +} + func TestAccKubernetesClusterNodePool_modeSystem(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} @@ -729,13 +752,20 @@ func TestAccKubernetesClusterNodePool_ultraSSD(t *testing.T) { r := KubernetesClusterNodePoolResource{} data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.ultraSSD(data, false), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("temporary_name_for_rotation"), { Config: r.ultraSSD(data, true), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep(), + data.ImportStep("temporary_name_for_rotation"), }) } @@ -1102,6 +1132,123 @@ func TestAccKubernetesClusterNodePool_virtualNetworkOwnershipRaceCondition(t *te }) } +func TestAccKubernetesClusterNodePool_updateVmSizeAfterFailureWithTempAndOriginal(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + r := KubernetesClusterNodePoolResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.manualScaleVMSkuConfig(data, "Standard_F2s_v2"), + Check: acceptance.ComposeTestCheckFunc( + check.That("azurerm_kubernetes_cluster_node_pool.test").ExistsInAzure(r), + // create the temporary node pool to simulate the case where both old node pool and temp node pool exist + data.CheckWithClientForResource(func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 1*time.Hour) + defer cancel() + } + + client := clients.Containers.AgentPoolsClient + + originalNodePoolId, err := agentpools.ParseAgentPoolID(state.Attributes["id"]) + if err != nil { + return fmt.Errorf("retrieving %s: %+v", originalNodePoolId, err) + } + + resp, err := client.Get(ctx, *originalNodePoolId) + if err != nil { + return fmt.Errorf("retrieving %s: %+v", *originalNodePoolId, err) + } + if resp.Model == nil { + return fmt.Errorf("retrieving %s: model was nil", *originalNodePoolId) + } + + tempNodePoolName := "temp" + profile := resp.Model + profile.Name = &tempNodePoolName + profile.Properties.VMSize = pointer.To("Standard_F4s_v2") + + tempNodePoolId := agentpools.NewAgentPoolID(originalNodePoolId.SubscriptionId, originalNodePoolId.ResourceGroupName, originalNodePoolId.ManagedClusterName, tempNodePoolName) + if err := client.CreateOrUpdateThenPoll(ctx, tempNodePoolId, *profile); err != nil { + return fmt.Errorf("creating %s: %+v", tempNodePoolId, err) + } + + return nil + }, data.ResourceName), + ), + }, + { + Config: r.manualScaleVMSkuConfig(data, "Standard_F4s_v2"), + Check: acceptance.ComposeTestCheckFunc( + check.That("azurerm_kubernetes_cluster_node_pool.test").ExistsInAzure(r), + ), + }, + data.ImportStep("temporary_name_for_rotation"), + }) +} + +func TestAccKubernetesClusterNodePool_updateVmSizeAfterFailureWithTempWithoutOriginal(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + r := KubernetesClusterNodePoolResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.manualScaleVMSkuConfig(data, "Standard_F2s_v2"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + // create the temporary node pool and delete the old node pool to simulate the case where resizing fails when trying to bring up the new node pool + data.CheckWithClientForResource(func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 1*time.Hour) + defer cancel() + } + + client := clients.Containers.AgentPoolsClient + + originalNodePoolId, err := agentpools.ParseAgentPoolID(state.Attributes["id"]) + if err != nil { + return fmt.Errorf("retrieving %s: %+v", originalNodePoolId, err) + } + resp, err := client.Get(ctx, *originalNodePoolId) + if err != nil { + return fmt.Errorf("retrieving %s: %+v", *originalNodePoolId, err) + } + if resp.Model == nil { + return fmt.Errorf("retrieving %s: model was nil", *originalNodePoolId) + } + + tempNodePoolName := "temp" + profile := resp.Model + profile.Name = &tempNodePoolName + profile.Properties.VMSize = pointer.To("Standard_F4s_v2") + + tempNodePoolId := agentpools.NewAgentPoolID(originalNodePoolId.SubscriptionId, originalNodePoolId.ResourceGroupName, originalNodePoolId.ManagedClusterName, tempNodePoolName) + if err := client.CreateOrUpdateThenPoll(ctx, tempNodePoolId, *profile); err != nil { + return fmt.Errorf("creating %s: %+v", tempNodePoolId, err) + } + + if err := client.DeleteThenPoll(ctx, *originalNodePoolId); err != nil { + return fmt.Errorf("deleting original %s: %+v", originalNodePoolId, err) + } + + return nil + }, data.ResourceName), + ), + // the plan will show that the default node pool name has been set to "temp" and we're trying to set it back to "default" + ExpectNonEmptyPlan: true, + }, + { + Config: r.manualScaleVMSkuConfig(data, "Standard_F4s_v2"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("temporary_name_for_rotation"), + }) +} + func (r KubernetesClusterNodePoolResource) autoScaleConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -1559,6 +1706,24 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { `, r.templateConfig(data), numberOfAgents) } +func (r KubernetesClusterNodePoolResource) manualScaleVMSkuConfig(data acceptance.TestData, sku string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "%s" + node_count = 1 + temporary_name_for_rotation = "temporal" +} +`, r.templateConfig(data), sku) +} + func (r KubernetesClusterNodePoolResource) modeSystemConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -2300,11 +2465,12 @@ resource "azurerm_kubernetes_cluster" "test" { } } resource "azurerm_kubernetes_cluster_node_pool" "test" { - name = "internal" - kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id - vm_size = "Standard_D2s_v3" - ultra_ssd_enabled = %t - zones = ["1", "2", "3"] + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_D2s_v3" + temporary_name_for_rotation = "temporal" + ultra_ssd_enabled = %t + zones = ["1", "2", "3"] } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, ultraSSDEnabled) } diff --git a/internal/services/containers/kubernetes_cluster_resource.go b/internal/services/containers/kubernetes_cluster_resource.go index cbd23a719b70..ea916b50ebe8 100644 --- a/internal/services/containers/kubernetes_cluster_resource.go +++ b/internal/services/containers/kubernetes_cluster_resource.go @@ -2420,7 +2420,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} tempAgentProfile.Name = &temporaryNodePoolName // if the temp node pool already exists due to a previous failure, don't bother spinning it up if tempExisting.Model == nil { - if err := retrySystemNodePoolCreation(ctx, nodePoolsClient, tempNodePoolId, tempAgentProfile); err != nil { + if err := retryNodePoolCreation(ctx, nodePoolsClient, tempNodePoolId, tempAgentProfile); err != nil { return fmt.Errorf("creating temporary %s: %+v", tempNodePoolId, err) } } @@ -2433,7 +2433,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} } // create the default node pool with the new vm size - if err := retrySystemNodePoolCreation(ctx, nodePoolsClient, defaultNodePoolId, agentProfile); err != nil { + if err := retryNodePoolCreation(ctx, nodePoolsClient, defaultNodePoolId, agentProfile); err != nil { // if creation of the default node pool fails we automatically fall back to the temporary node pool // in func findDefaultNodePool log.Printf("[DEBUG] Creation of resized default node pool failed") @@ -4574,8 +4574,8 @@ func flattenKubernetesClusterMetricsProfile(input *managedclusters.ManagedCluste return pointer.From(input.CostAnalysis.Enabled) } -func retrySystemNodePoolCreation(ctx context.Context, client *agentpools.AgentPoolsClient, id agentpools.AgentPoolId, profile agentpools.AgentPool) error { - // retries the creation of a system node pool 3 times +func retryNodePoolCreation(ctx context.Context, client *agentpools.AgentPoolsClient, id agentpools.AgentPoolId, profile agentpools.AgentPool) error { + // retries the creation of a node pool 3 times var err error for attempt := 0; attempt < 3; attempt++ { if err = client.CreateOrUpdateThenPoll(ctx, id, profile); err == nil { diff --git a/internal/services/containers/kubernetes_nodepool.go b/internal/services/containers/kubernetes_nodepool.go index d8747c83769b..ed720a91f32e 100644 --- a/internal/services/containers/kubernetes_nodepool.go +++ b/internal/services/containers/kubernetes_nodepool.go @@ -353,96 +353,6 @@ func schemaNodePoolKubeletConfig() *pluginsdk.Schema { } } -func schemaNodePoolKubeletConfigForceNew() *pluginsdk.Schema { - return &pluginsdk.Schema{ - Type: pluginsdk.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "cpu_manager_policy": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - "none", - "static", - }, false), - }, - - "cpu_cfs_quota_enabled": { - Type: pluginsdk.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "cpu_cfs_quota_period": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - }, - - "image_gc_high_threshold": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "image_gc_low_threshold": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "topology_manager_policy": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - "none", - "best-effort", - "restricted", - "single-numa-node", - }, false), - }, - - "allowed_unsafe_sysctls": { - Type: pluginsdk.TypeSet, - Optional: true, - ForceNew: true, - Elem: &pluginsdk.Schema{ - Type: pluginsdk.TypeString, - }, - }, - - "container_log_max_size_mb": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - }, - - // TODO 5.0: change this to `container_log_max_files` - "container_log_max_line": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntAtLeast(2), - }, - - "pod_max_pid": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - }, - }, - }, - } -} - func schemaNodePoolLinuxOSConfig() *pluginsdk.Schema { return &pluginsdk.Schema{ Type: pluginsdk.TypeList, @@ -483,50 +393,6 @@ func schemaNodePoolLinuxOSConfig() *pluginsdk.Schema { } } -func schemaNodePoolLinuxOSConfigForceNew() *pluginsdk.Schema { - return &pluginsdk.Schema{ - Type: pluginsdk.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "sysctl_config": schemaNodePoolSysctlConfigForceNew(), - - "transparent_huge_page_enabled": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - "always", - "madvise", - "never", - }, false), - }, - - "transparent_huge_page_defrag": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - "always", - "defer", - "defer+madvise", - "madvise", - "never", - }, false), - }, - - "swap_file_size_mb": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - }, - }, - }, - } -} - func schemaNodePoolSysctlConfig() *pluginsdk.Schema { return &pluginsdk.Schema{ Type: pluginsdk.TypeList, @@ -711,220 +577,6 @@ func schemaNodePoolSysctlConfig() *pluginsdk.Schema { } } -func schemaNodePoolSysctlConfigForceNew() *pluginsdk.Schema { - return &pluginsdk.Schema{ - Type: pluginsdk.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "fs_aio_max_nr": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(65536, 6553500), - }, - - "fs_file_max": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(8192, 12000500), - }, - - "fs_inotify_max_user_watches": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(781250, 2097152), - }, - - "fs_nr_open": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(8192, 20000500), - }, - - "kernel_threads_max": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(20, 513785), - }, - - "net_core_netdev_max_backlog": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(1000, 3240000), - }, - - "net_core_optmem_max": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(20480, 4194304), - }, - - "net_core_rmem_default": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(212992, 134217728), - }, - - "net_core_rmem_max": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(212992, 134217728), - }, - - "net_core_somaxconn": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(4096, 3240000), - }, - - "net_core_wmem_default": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(212992, 134217728), - }, - - "net_core_wmem_max": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(212992, 134217728), - }, - - "net_ipv4_ip_local_port_range_min": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(1024, 60999), - }, - - "net_ipv4_ip_local_port_range_max": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(32768, 65535), - }, - - "net_ipv4_neigh_default_gc_thresh1": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(128, 80000), - }, - - "net_ipv4_neigh_default_gc_thresh2": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(512, 90000), - }, - - "net_ipv4_neigh_default_gc_thresh3": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(1024, 100000), - }, - - "net_ipv4_tcp_fin_timeout": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(5, 120), - }, - - "net_ipv4_tcp_keepalive_intvl": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(10, 90), - }, - - "net_ipv4_tcp_keepalive_probes": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(1, 15), - }, - - "net_ipv4_tcp_keepalive_time": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(30, 432000), - }, - - "net_ipv4_tcp_max_syn_backlog": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(128, 3240000), - }, - - "net_ipv4_tcp_max_tw_buckets": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(8000, 1440000), - }, - - "net_ipv4_tcp_tw_reuse": { - Type: pluginsdk.TypeBool, - Optional: true, - ForceNew: true, - }, - - "net_netfilter_nf_conntrack_buckets": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(65536, 524288), - }, - - "net_netfilter_nf_conntrack_max": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(131072, 2097152), - }, - - "vm_max_map_count": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(65530, 262144), - }, - - "vm_swappiness": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "vm_vfs_cache_pressure": { - Type: pluginsdk.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - }, - }, - } -} - func schemaNodePoolNetworkProfile() *pluginsdk.Schema { return &pluginsdk.Schema{ Type: pluginsdk.TypeList, diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index 32533569cf5c..41f3ee87c379 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -14,6 +14,8 @@ Manages a Node Pool within a Kubernetes Cluster ~> **NOTE:** Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets. +-> **Note:** Changing certain properties is done by cycling the node pool. When cycling it, it doesn’t perform cordon and drain, and it will disrupt rescheduling pods currently running on the previous node pool. `temporary_name_for_rotation` must be specified when changing any of the following properties: `fips_enabled`, `host_encryption_enabled`, `kubelet_config`, `linux_os_config`, `max_pods`, `node_public_ip_enabled`, `os_disk_size_gb`, `os_disk_type`, `pod_subnet_id`, `snapshot_id`, `ultra_ssd_enabled`, `vm_size`, `vnet_subnet_id`, `zones`. + ## Example Usage This example provisions a basic Kubernetes Node Pool. Other examples of the `azurerm_kubernetes_cluster_node_pool` resource can be found in [the `./examples/kubernetes` directory within the GitHub Repository](https://github.com/hashicorp/terraform-provider-azurerm/tree/main/examples/kubernetes) @@ -66,7 +68,7 @@ The following arguments are supported: ~> **NOTE:** The type of Default Node Pool for the Kubernetes Cluster must be `VirtualMachineScaleSets` to attach multiple node pools. -* `vm_size` - (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. +* `vm_size` - (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this property requires specifying `temporary_name_for_rotation`. --- @@ -74,11 +76,11 @@ The following arguments are supported: * `auto_scaling_enabled` - (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler). -* `host_encryption_enabled` - (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. +* `host_encryption_enabled` - (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this property requires specifying `temporary_name_for_rotation`. ~> **NOTE:** Additional fields must be configured depending on the value of this field - see below. -* `node_public_ip_enabled` - (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created. +* `node_public_ip_enabled` - (Optional) Should each node have a Public IP Address? Changing this property requires specifying `temporary_name_for_rotation`. * `eviction_policy` - (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. @@ -86,11 +88,11 @@ The following arguments are supported: * `host_group_id` - (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. -* `kubelet_config` - (Optional) A `kubelet_config` block as defined below. Changing this forces a new resource to be created. +* `kubelet_config` - (Optional) A `kubelet_config` block as defined below. Changing this requires specifying `temporary_name_for_rotation`. -* `linux_os_config` - (Optional) A `linux_os_config` block as defined below. Changing this forces a new resource to be created. +* `linux_os_config` - (Optional) A `linux_os_config` block as defined below. Changing this requires specifying `temporary_name_for_rotation`. -* `fips_enabled` - (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. +* `fips_enabled` - (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this property requires specifying `temporary_name_for_rotation`. ~> **Note:** FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview). @@ -98,7 +100,7 @@ The following arguments are supported: * `kubelet_disk_type` - (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`. -* `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. +* `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this property requires specifying `temporary_name_for_rotation`. * `mode` - (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. @@ -114,11 +116,11 @@ The following arguments are supported: -> **Note:** This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. -* `os_disk_size_gb` - (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. +* `os_disk_size_gb` - (Optional) The Agent Operating System disk size in GB. Changing this property requires specifying `temporary_name_for_rotation`. -* `os_disk_type` - (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. +* `os_disk_type` - (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this property requires specifying `temporary_name_for_rotation`. -* `pod_subnet_id` - (Optional) The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. +* `pod_subnet_id` - (Optional) The ID of the Subnet where the pods in the Node Pool should exist. Changing this property requires specifying `temporary_name_for_rotation`. * `os_sku` - (Optional) Specifies the OS SKU used by the agent pool. Possible values are `AzureLinux`, `Ubuntu`, `Windows2019` and `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this from `AzureLinux` or `Ubuntu` to `AzureLinux` or `Ubuntu` will not replace the resource, otherwise it forces a new resource to be created. @@ -134,7 +136,7 @@ The following arguments are supported: ~> **Note:** This field can only be configured when `priority` is set to `Spot`. -* `snapshot_id` - (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. +* `snapshot_id` - (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this property requires specifying `temporary_name_for_rotation`. * `tags` - (Optional) A mapping of tags to assign to the resource. @@ -142,11 +144,13 @@ The following arguments are supported: * `scale_down_mode` - (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`. -* `ultra_ssd_enabled` - (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created. +* `temporary_name_for_rotation` - (Optional) Specifies the name of the temporary node pool used to cycle the node pool when one of the relevant properties are updated. + +* `ultra_ssd_enabled` - (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this property requires specifying `temporary_name_for_rotation`. * `upgrade_settings` - (Optional) A `upgrade_settings` block as documented below. -* `vnet_subnet_id` - (Optional) The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. +* `vnet_subnet_id` - (Optional) The ID of the Subnet where this Node Pool should exist. Changing this property requires specifying `temporary_name_for_rotation`. ~> **NOTE:** A route table must be configured on this Subnet. @@ -156,7 +160,7 @@ The following arguments are supported: ~> **Note:** WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools) -* `zones` - (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. +* `zones` - (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this property requires specifying `temporary_name_for_rotation`. --- @@ -178,37 +182,37 @@ If `auto_scaling_enabled` is set to `false`, then the following fields can also A `kubelet_config` block supports the following: -* `allowed_unsafe_sysctls` - (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. +* `allowed_unsafe_sysctls` - (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). -* `container_log_max_line` - (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. +* `container_log_max_line` - (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. -* `container_log_max_size_mb` - (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. +* `container_log_max_size_mb` - (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. -* `cpu_cfs_quota_enabled` - (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. +* `cpu_cfs_quota_enabled` - (Optional) Is CPU CFS quota enforcement for containers enabled? Defaults to `true`. -* `cpu_cfs_quota_period` - (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. +* `cpu_cfs_quota_period` - (Optional) Specifies the CPU CFS quota period value. -* `cpu_manager_policy` - (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. +* `cpu_manager_policy` - (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, -* `image_gc_high_threshold` - (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. +* `image_gc_high_threshold` - (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. -* `image_gc_low_threshold` - (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. +* `image_gc_low_threshold` - (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. -* `pod_max_pid` - (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. +* `pod_max_pid` - (Optional) Specifies the maximum number of processes per pod. -* `topology_manager_policy` - (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. +* `topology_manager_policy` - (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. --- A `linux_os_config` block supports the following: -* `swap_file_size_mb` - (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. +* `swap_file_size_mb` - (Optional) Specifies the size of swap file on each node in MB. -* `sysctl_config` - (Optional) A `sysctl_config` block as defined below. Changing this forces a new resource to be created. +* `sysctl_config` - (Optional) A `sysctl_config` block as defined below. -* `transparent_huge_page_defrag` - (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. +* `transparent_huge_page_defrag` - (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. -* `transparent_huge_page_enabled` - (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. +* `transparent_huge_page_enabled` - (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. --- @@ -236,63 +240,63 @@ A `sysctl_config` block supports the following: ~> For more information, please refer to [Linux Kernel Doc](https://www.kernel.org/doc/html/latest/admin-guide/sysctl/index.html). -* `fs_aio_max_nr` - (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. +* `fs_aio_max_nr` - (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. -* `fs_file_max` - (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. +* `fs_file_max` - (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. -* `fs_inotify_max_user_watches` - (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. +* `fs_inotify_max_user_watches` - (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. -* `fs_nr_open` - (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. +* `fs_nr_open` - (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. -* `kernel_threads_max` - (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. +* `kernel_threads_max` - (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. -* `net_core_netdev_max_backlog` - (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. +* `net_core_netdev_max_backlog` - (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. -* `net_core_optmem_max` - (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. +* `net_core_optmem_max` - (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. -* `net_core_rmem_default` - (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. +* `net_core_rmem_default` - (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. -* `net_core_rmem_max` - (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. +* `net_core_rmem_max` - (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. -* `net_core_somaxconn` - (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. +* `net_core_somaxconn` - (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. -* `net_core_wmem_default` - (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. +* `net_core_wmem_default` - (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. -* `net_core_wmem_max` - (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. +* `net_core_wmem_max` - (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. -* `net_ipv4_ip_local_port_range_max` - (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `32768` and `65535`. Changing this forces a new resource to be created. +* `net_ipv4_ip_local_port_range_max` - (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `32768` and `65535`. -* `net_ipv4_ip_local_port_range_min` - (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. +* `net_ipv4_ip_local_port_range_min` - (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. -* `net_ipv4_neigh_default_gc_thresh1` - (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. +* `net_ipv4_neigh_default_gc_thresh1` - (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. -* `net_ipv4_neigh_default_gc_thresh2` - (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. +* `net_ipv4_neigh_default_gc_thresh2` - (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. -* `net_ipv4_neigh_default_gc_thresh3` - (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. +* `net_ipv4_neigh_default_gc_thresh3` - (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. -* `net_ipv4_tcp_fin_timeout` - (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. +* `net_ipv4_tcp_fin_timeout` - (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. -* `net_ipv4_tcp_keepalive_intvl` - (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `90`. Changing this forces a new resource to be created. +* `net_ipv4_tcp_keepalive_intvl` - (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `90`. -* `net_ipv4_tcp_keepalive_probes` - (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. +* `net_ipv4_tcp_keepalive_probes` - (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. -* `net_ipv4_tcp_keepalive_time` - (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. +* `net_ipv4_tcp_keepalive_time` - (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. -* `net_ipv4_tcp_max_syn_backlog` - (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. +* `net_ipv4_tcp_max_syn_backlog` - (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. -* `net_ipv4_tcp_max_tw_buckets` - (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. +* `net_ipv4_tcp_max_tw_buckets` - (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. -* `net_ipv4_tcp_tw_reuse` - (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. +* `net_ipv4_tcp_tw_reuse` - (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? -* `net_netfilter_nf_conntrack_buckets` - (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `524288`. Changing this forces a new resource to be created. +* `net_netfilter_nf_conntrack_buckets` - (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `524288`. -* `net_netfilter_nf_conntrack_max` - (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `2097152`. Changing this forces a new resource to be created. +* `net_netfilter_nf_conntrack_max` - (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `2097152`. -* `vm_max_map_count` - (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. +* `vm_max_map_count` - (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. -* `vm_swappiness` - (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. +* `vm_swappiness` - (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. -* `vm_vfs_cache_pressure` - (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. +* `vm_vfs_cache_pressure` - (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. --- @@ -300,7 +304,7 @@ A `upgrade_settings` block supports the following: * `drain_timeout_in_minutes` - (Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created. -* `node_soak_duration_in_minutes` - (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to `0`. +* `node_soak_duration_in_minutes` - (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. * `max_surge` - (Required) The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade.