diff --git a/examples/application_gateway_ingress/main.tf b/examples/application_gateway_ingress/main.tf index e10413ec..aa0ef95a 100644 --- a/examples/application_gateway_ingress/main.tf +++ b/examples/application_gateway_ingress/main.tf @@ -183,6 +183,7 @@ module "aks" { private_cluster_enabled = false rbac_aad = true rbac_aad_managed = true + rbac_aad_tenant_id = data.azurerm_client_config.this.tenant_id role_based_access_control_enabled = true sku_tier = "Standard" vnet_subnet_id = var.bring_your_own_vnet ? azurerm_subnet.test[0].id : null diff --git a/examples/application_gateway_ingress_v4/data.tf b/examples/application_gateway_ingress_v4/data.tf new file mode 100644 index 00000000..012126ad --- /dev/null +++ b/examples/application_gateway_ingress_v4/data.tf @@ -0,0 +1 @@ +data "azurerm_client_config" "this" {} \ No newline at end of file diff --git a/examples/application_gateway_ingress_v4/k8s_workload.tf b/examples/application_gateway_ingress_v4/k8s_workload.tf new file mode 120000 index 00000000..3a85d4c7 --- /dev/null +++ b/examples/application_gateway_ingress_v4/k8s_workload.tf @@ -0,0 +1 @@ +../application_gateway_ingress/k8s_workload.tf \ No newline at end of file diff --git a/examples/application_gateway_ingress_v4/main.tf b/examples/application_gateway_ingress_v4/main.tf new file mode 120000 index 00000000..025a83a1 --- /dev/null +++ b/examples/application_gateway_ingress_v4/main.tf @@ -0,0 +1 @@ +../application_gateway_ingress/main.tf \ No newline at end of file diff --git a/examples/application_gateway_ingress_v4/main_override.tf b/examples/application_gateway_ingress_v4/main_override.tf new file mode 100644 index 00000000..370cc63a --- /dev/null +++ b/examples/application_gateway_ingress_v4/main_override.tf @@ -0,0 +1,5 @@ +module "aks" { + #checkov:skip=CKV_AZURE_141:We enable admin account here so we can provision K8s resources directly in this simple example + source = "../../v4" + rbac_aad_tenant_id = data.azurerm_client_config.this.tenant_id +} \ No newline at end of file diff --git a/examples/application_gateway_ingress_v4/outputs.tf b/examples/application_gateway_ingress_v4/outputs.tf new file mode 120000 index 00000000..b7f9f6ed --- /dev/null +++ b/examples/application_gateway_ingress_v4/outputs.tf @@ -0,0 +1 @@ +../application_gateway_ingress/outputs.tf \ No newline at end of file diff --git a/examples/application_gateway_ingress_v4/providers.tf b/examples/application_gateway_ingress_v4/providers.tf new file mode 120000 index 00000000..66780034 --- /dev/null +++ b/examples/application_gateway_ingress_v4/providers.tf @@ -0,0 +1 @@ +../application_gateway_ingress/providers.tf \ No newline at end of file diff --git a/examples/application_gateway_ingress_v4/providers_override.tf b/examples/application_gateway_ingress_v4/providers_override.tf new file mode 100644 index 00000000..ab616209 --- /dev/null +++ b/examples/application_gateway_ingress_v4/providers_override.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 4.0" + } + } +} \ No newline at end of file diff --git a/examples/application_gateway_ingress_v4/variables.tf b/examples/application_gateway_ingress_v4/variables.tf new file mode 120000 index 00000000..7899944f --- /dev/null +++ b/examples/application_gateway_ingress_v4/variables.tf @@ -0,0 +1 @@ +../application_gateway_ingress/variables.tf \ No newline at end of file diff --git a/examples/multiple_node_pools/main.tf b/examples/multiple_node_pools/main.tf index 2cba335b..a8b7291f 100644 --- a/examples/multiple_node_pools/main.tf +++ b/examples/multiple_node_pools/main.tf @@ -28,7 +28,6 @@ resource "azurerm_subnet" "test" { name = "${random_id.prefix.hex}-sn" resource_group_name = local.resource_group.name virtual_network_name = azurerm_virtual_network.test.name - enforce_private_link_endpoint_network_policies = true } locals { diff --git a/examples/multiple_node_pools/main_override.tf b/examples/multiple_node_pools/main_override.tf new file mode 100644 index 00000000..e3a7af9f --- /dev/null +++ b/examples/multiple_node_pools/main_override.tf @@ -0,0 +1,3 @@ +resource "azurerm_subnet" "test" { + enforce_private_link_endpoint_network_policies = true +} \ No newline at end of file diff --git a/examples/multiple_node_pools_v4/main.tf b/examples/multiple_node_pools_v4/main.tf new file mode 120000 index 00000000..a320b6cf --- /dev/null +++ b/examples/multiple_node_pools_v4/main.tf @@ -0,0 +1 @@ +../multiple_node_pools/main.tf \ No newline at end of file diff --git a/examples/multiple_node_pools_v4/main_override.tf b/examples/multiple_node_pools_v4/main_override.tf new file mode 100644 index 00000000..b7c945b8 --- /dev/null +++ b/examples/multiple_node_pools_v4/main_override.tf @@ -0,0 +1,8 @@ +resource "azurerm_subnet" "test" { + private_endpoint_network_policies = "Disabled" + private_link_service_network_policies_enabled = true +} + +module "aks" { + source = "../../v4" +} \ No newline at end of file diff --git a/examples/multiple_node_pools_v4/outputs.tf b/examples/multiple_node_pools_v4/outputs.tf new file mode 120000 index 00000000..a2313596 --- /dev/null +++ b/examples/multiple_node_pools_v4/outputs.tf @@ -0,0 +1 @@ +../multiple_node_pools/outputs.tf \ No newline at end of file diff --git a/examples/multiple_node_pools_v4/providers.tf b/examples/multiple_node_pools_v4/providers.tf new file mode 120000 index 00000000..99bf33e1 --- /dev/null +++ b/examples/multiple_node_pools_v4/providers.tf @@ -0,0 +1 @@ +../multiple_node_pools/providers.tf \ No newline at end of file diff --git a/examples/multiple_node_pools_v4/providers_override.tf b/examples/multiple_node_pools_v4/providers_override.tf new file mode 100644 index 00000000..613b6157 --- /dev/null +++ b/examples/multiple_node_pools_v4/providers_override.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 4.0" + } + random = { + source = "hashicorp/random" + version = "3.3.2" + } + } +} \ No newline at end of file diff --git a/examples/multiple_node_pools_v4/variables.tf b/examples/multiple_node_pools_v4/variables.tf new file mode 120000 index 00000000..a2f1eeca --- /dev/null +++ b/examples/multiple_node_pools_v4/variables.tf @@ -0,0 +1 @@ +../multiple_node_pools/variables.tf \ No newline at end of file diff --git a/extra_node_pool.tf b/extra_node_pool.tf index 9b732b35..9bc68313 100644 --- a/extra_node_pool.tf +++ b/extra_node_pool.tf @@ -10,10 +10,6 @@ resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy name = "${each.value.name}${substr(md5(uuid()), 0, 4)}" vm_size = each.value.vm_size capacity_reservation_group_id = each.value.capacity_reservation_group_id - custom_ca_trust_enabled = each.value.custom_ca_trust_enabled - enable_auto_scaling = each.value.enable_auto_scaling - enable_host_encryption = each.value.enable_host_encryption - enable_node_public_ip = each.value.enable_node_public_ip eviction_policy = each.value.eviction_policy fips_enabled = each.value.fips_enabled gpu_instance = each.value.gpu_instance @@ -21,7 +17,6 @@ resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy kubelet_disk_type = each.value.kubelet_disk_type max_count = each.value.max_count max_pods = each.value.max_pods - message_of_the_day = each.value.message_of_the_day min_count = each.value.min_count mode = each.value.mode node_count = each.value.node_count @@ -171,17 +166,12 @@ resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" name = each.value.name vm_size = each.value.vm_size capacity_reservation_group_id = each.value.capacity_reservation_group_id - custom_ca_trust_enabled = each.value.custom_ca_trust_enabled - enable_auto_scaling = each.value.enable_auto_scaling - enable_host_encryption = each.value.enable_host_encryption - enable_node_public_ip = each.value.enable_node_public_ip eviction_policy = each.value.eviction_policy fips_enabled = each.value.fips_enabled host_group_id = each.value.host_group_id kubelet_disk_type = each.value.kubelet_disk_type max_count = each.value.max_count max_pods = each.value.max_pods - message_of_the_day = each.value.message_of_the_day min_count = each.value.min_count mode = each.value.mode node_count = each.value.node_count diff --git a/extra_node_pool_override.tf b/extra_node_pool_override.tf new file mode 100644 index 00000000..6dcae10e --- /dev/null +++ b/extra_node_pool_override.tf @@ -0,0 +1,15 @@ +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { + custom_ca_trust_enabled = each.value.custom_ca_trust_enabled + enable_auto_scaling = each.value.enable_auto_scaling + enable_host_encryption = each.value.enable_host_encryption + enable_node_public_ip = each.value.enable_node_public_ip + message_of_the_day = each.value.message_of_the_day +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { + custom_ca_trust_enabled = each.value.custom_ca_trust_enabled + enable_auto_scaling = each.value.enable_auto_scaling + enable_host_encryption = each.value.enable_host_encryption + enable_node_public_ip = each.value.enable_node_public_ip + message_of_the_day = each.value.message_of_the_day +} \ No newline at end of file diff --git a/main_override.tf b/main_override.tf new file mode 100644 index 00000000..86501234 --- /dev/null +++ b/main_override.tf @@ -0,0 +1,246 @@ +resource "azurerm_kubernetes_cluster" "main" { + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"] + + content { + name = var.agents_pool_name + vm_size = var.agents_size + enable_auto_scaling = var.enable_auto_scaling + enable_host_encryption = var.enable_host_encryption + enable_node_public_ip = var.enable_node_public_ip + fips_enabled = var.default_node_pool_fips_enabled + max_count = null + max_pods = var.agents_max_pods + min_count = null + node_count = var.agents_count + node_labels = var.agents_labels + node_taints = var.agents_taints + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = var.pod_subnet_id + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vnet_subnet_id = var.vnet_subnet_id + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] + + content { + name = var.agents_pool_name + vm_size = var.agents_size + enable_auto_scaling = var.enable_auto_scaling + enable_host_encryption = var.enable_host_encryption + enable_node_public_ip = var.enable_node_public_ip + fips_enabled = var.default_node_pool_fips_enabled + max_count = var.agents_max_count + max_pods = var.agents_max_pods + min_count = var.agents_min_count + node_labels = var.agents_labels + node_taints = var.agents_taints + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = var.pod_subnet_id + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vnet_subnet_id = var.vnet_subnet_id + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + network_profile { + network_plugin = var.network_plugin + dns_service_ip = var.net_profile_dns_service_ip + ebpf_data_plane = var.ebpf_data_plane + load_balancer_sku = var.load_balancer_sku + network_plugin_mode = var.network_plugin_mode + network_policy = var.network_policy + outbound_type = var.net_profile_outbound_type + pod_cidr = var.net_profile_pod_cidr + service_cidr = var.net_profile_service_cidr + + dynamic "load_balancer_profile" { + for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [ + "load_balancer_profile" + ] : [] + + content { + idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes + managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count + managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count + outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids + outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids + outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated + } + } + } + + lifecycle { + ignore_changes = [ + http_application_routing_enabled, + http_proxy_config[0].no_proxy, + kubernetes_version, + public_network_access_enabled, + # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. + name, + ] + } +} \ No newline at end of file diff --git a/v4/extra_node_pool.tf b/v4/extra_node_pool.tf new file mode 120000 index 00000000..9cbc2968 --- /dev/null +++ b/v4/extra_node_pool.tf @@ -0,0 +1 @@ +../extra_node_pool.tf \ No newline at end of file diff --git a/v4/extra_node_pool_override.tf b/v4/extra_node_pool_override.tf new file mode 100644 index 00000000..21ad94b3 --- /dev/null +++ b/v4/extra_node_pool_override.tf @@ -0,0 +1,11 @@ +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { + auto_scaling_enabled = each.value.enable_auto_scaling + host_encryption_enabled = each.value.enable_host_encryption + node_public_ip_enabled = each.value.enable_node_public_ip +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { + auto_scaling_enabled = each.value.enable_auto_scaling + host_encryption_enabled = each.value.enable_host_encryption + node_public_ip_enabled = each.value.enable_node_public_ip +} \ No newline at end of file diff --git a/v4/locals.tf b/v4/locals.tf new file mode 120000 index 00000000..1b032e65 --- /dev/null +++ b/v4/locals.tf @@ -0,0 +1 @@ +../locals.tf \ No newline at end of file diff --git a/v4/log_analytics.tf b/v4/log_analytics.tf new file mode 120000 index 00000000..639a396c --- /dev/null +++ b/v4/log_analytics.tf @@ -0,0 +1 @@ +../log_analytics.tf \ No newline at end of file diff --git a/v4/main.tf b/v4/main.tf new file mode 120000 index 00000000..6c481fa3 --- /dev/null +++ b/v4/main.tf @@ -0,0 +1 @@ +../main.tf \ No newline at end of file diff --git a/v4/main_override.tf b/v4/main_override.tf new file mode 100644 index 00000000..b8648399 --- /dev/null +++ b/v4/main_override.tf @@ -0,0 +1,298 @@ +resource "azurerm_kubernetes_cluster" "main" { + automatic_upgrade_channel = var.automatic_channel_upgrade + node_os_upgrade_channel = var.node_os_channel_upgrade + + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"] + + content { + name = var.agents_pool_name + vm_size = var.agents_size + auto_scaling_enabled = var.enable_auto_scaling + fips_enabled = var.default_node_pool_fips_enabled + host_encryption_enabled = var.enable_host_encryption + max_count = null + max_pods = var.agents_max_pods + min_count = null + node_count = var.agents_count + node_labels = var.agents_labels + node_public_ip_enabled = var.enable_node_public_ip + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = var.pod_subnet_id + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vnet_subnet_id = var.vnet_subnet_id + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] + + content { + name = var.agents_pool_name + vm_size = var.agents_size + auto_scaling_enabled = var.enable_auto_scaling + fips_enabled = var.default_node_pool_fips_enabled + host_encryption_enabled = var.enable_host_encryption + max_count = var.agents_max_count + max_pods = var.agents_max_pods + min_count = var.agents_min_count + node_labels = var.agents_labels + node_public_ip_enabled = var.enable_node_public_ip + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = var.pod_subnet_id + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vnet_subnet_id = var.vnet_subnet_id + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "service_mesh_profile" { + for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] + + content { + mode = var.service_mesh_profile.mode + revisions = var.service_mesh_profile.revisions + external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled + internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled + } + } + dynamic "api_server_access_profile" { + for_each = var.api_server_authorized_ip_ranges != null || var.api_server_subnet_id != null ? [ + "api_server_access_profile" + ] : [] + + content { + authorized_ip_ranges = var.api_server_authorized_ip_ranges + } + } + dynamic "azure_active_directory_role_based_access_control" { + for_each = var.role_based_access_control_enabled && var.rbac_aad && var.rbac_aad_managed ? ["rbac"] : [] + + content { + admin_group_object_ids = var.rbac_aad_admin_group_object_ids + azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled + tenant_id = var.rbac_aad_tenant_id + } + } + dynamic "azure_active_directory_role_based_access_control" { + for_each = var.role_based_access_control_enabled && var.rbac_aad && !var.rbac_aad_managed ? ["rbac"] : [] + + content { + tenant_id = var.rbac_aad_tenant_id + } + } + network_profile { + network_plugin = var.network_plugin + dns_service_ip = var.net_profile_dns_service_ip + load_balancer_sku = var.load_balancer_sku + network_data_plane = var.ebpf_data_plane + network_plugin_mode = var.network_plugin_mode + network_policy = var.network_policy + outbound_type = var.net_profile_outbound_type + pod_cidr = var.net_profile_pod_cidr + service_cidr = var.net_profile_service_cidr + + dynamic "load_balancer_profile" { + for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [ + "load_balancer_profile" + ] : [] + + content { + idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes + managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count + managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count + outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids + outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids + outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated + } + } + } + dynamic "storage_profile" { + for_each = var.storage_profile_enabled ? ["storage_profile"] : [] + + content { + blob_driver_enabled = var.storage_profile_blob_driver_enabled + disk_driver_enabled = var.storage_profile_disk_driver_enabled + file_driver_enabled = var.storage_profile_file_driver_enabled + snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled + } + } + dynamic "web_app_routing" { + for_each = var.web_app_routing == null ? [] : ["web_app_routing"] + + content { + dns_zone_ids = [var.web_app_routing.dns_zone_id] + } + } + + lifecycle { + ignore_changes = [ + http_application_routing_enabled, + http_proxy_config[0].no_proxy, + kubernetes_version, + # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. + name, + api_server_access_profile, network_profile[0].load_balancer_profile[0].outbound_ip_address_ids, network_profile[0].load_balancer_profile[0].outbound_ip_prefix_ids] + } +} \ No newline at end of file diff --git a/v4/outputs.tf b/v4/outputs.tf new file mode 120000 index 00000000..1a861df4 --- /dev/null +++ b/v4/outputs.tf @@ -0,0 +1 @@ +../outputs.tf \ No newline at end of file diff --git a/v4/role_assignments.tf b/v4/role_assignments.tf new file mode 120000 index 00000000..705ff1c9 --- /dev/null +++ b/v4/role_assignments.tf @@ -0,0 +1 @@ +../role_assignments.tf \ No newline at end of file diff --git a/v4/variables.tf b/v4/variables.tf new file mode 120000 index 00000000..3a65dccd --- /dev/null +++ b/v4/variables.tf @@ -0,0 +1 @@ +../variables.tf \ No newline at end of file diff --git a/v4/variables_override.tf b/v4/variables_override.tf new file mode 100644 index 00000000..2a35b6a7 --- /dev/null +++ b/v4/variables_override.tf @@ -0,0 +1,15 @@ +variable "service_mesh_profile" { + type = object({ + mode = string + revisions = list(string) + internal_ingress_gateway_enabled = optional(bool, true) + external_ingress_gateway_enabled = optional(bool, true) + }) + default = null + description = <<-EOT + `mode` - (Required) The mode of the service mesh. Possible value is `Istio`. + `revisions` - (Required) Specify 1 or 2 Istio control plane revisions for managing minor upgrades using the canary upgrade process. For example, create the resource with `revisions` set to `["asm-1-20"]`, or leave it empty (the `revisions` will only be known after apply). To start the canary upgrade, change `revisions` to `["asm-1-20", "asm-1-21"]`. To roll back the canary upgrade, revert to `["asm-1-20"]`. To confirm the upgrade, change to `["asm-1-21"]`. + `internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`. + `external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. + EOT +} \ No newline at end of file diff --git a/v4/versions.tf b/v4/versions.tf new file mode 120000 index 00000000..8bd0ff14 --- /dev/null +++ b/v4/versions.tf @@ -0,0 +1 @@ +../versions.tf \ No newline at end of file diff --git a/v4/versions_override.tf b/v4/versions_override.tf new file mode 100644 index 00000000..e7d3b24b --- /dev/null +++ b/v4/versions_override.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + azapi = { + source = "Azure/azapi" + version = ">= 1.4.0, < 2.0" + } + azurerm = { + source = "hashicorp/azurerm" + version = "~> 4.0" + } + null = { + source = "hashicorp/null" + version = ">= 3.0" + } + tls = { + source = "hashicorp/tls" + version = ">= 3.1" + } + } +}