Skip to content

Commit

Permalink
Merge branch 'pr-pskd-1145' into pr-pskd-1363-refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
saschjmil authored Feb 26, 2025
2 parents 4bfb6c7 + dae9644 commit a79f9f0
Show file tree
Hide file tree
Showing 3 changed files with 214 additions and 126 deletions.
98 changes: 93 additions & 5 deletions test/default_integration_plan_new_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,27 +144,27 @@ func TestPlanNodePoolsNew(t *testing.T) {
attributeJsonPath: "{$.default_node_pool[0].vm_size}",
},
"defaultNodepoolOsDiskSizeTest": {
expected: 128,
expected: "128",
resourceMapName: "module.aks.azurerm_kubernetes_cluster.aks",
attributeJsonPath: "{$.default_node_pool[0].os_disk_size_gb}",
},
"defaultNodepoolMaxPodsTest": {
expected: 110,
expected: "110",
resourceMapName: "module.aks.azurerm_kubernetes_cluster.aks",
attributeJsonPath: "{$.default_node_pool[0].max_pods}",
},
"defaultNodepoolMinNodesTest": {
expected: 1,
expected: "1",
resourceMapName: "module.aks.azurerm_kubernetes_cluster.aks",
attributeJsonPath: "{$.default_node_pool[0].min_count}",
},
"defaultNodepoolMaxNodesTest": {
expected: 5,
expected: "5",
resourceMapName: "module.aks.azurerm_kubernetes_cluster.aks",
attributeJsonPath: "{$.default_node_pool[0].max_count}",
},
"defaultNodepoolAvailabilityZonesTest": {
expected: []string{"1"},
expected: "[\"1\"]",
resourceMapName: "module.aks.azurerm_kubernetes_cluster.aks",
attributeJsonPath: "{$.default_node_pool[0].zones}",
},
Expand All @@ -181,3 +181,91 @@ func TestPlanNodePoolsNew(t *testing.T) {
})
}
}

// Test the default additional nodepool variables when using the sample-input-defaults.tfvars file.
// Verify that the tfplan is using the default variables from the CONFIG-VARS
func TestPlanAdditionalNodePools(t *testing.T) {
type attrTuple struct {
expectedValue string
jsonPath string
}

type nodepoolTestcase struct {
expected map[string]attrTuple
}

nodepoolTests := map[string]nodepoolTestcase{
"stateless": {
expected: map[string]attrTuple{
"MachineType": {`Standard_D4s_v5`, "{$.vm_size}"},
"OsDiskSize": {`200`, "{$.os_disk_size_gb}"},
"MinNodes": {`0`, "{$.min_count}"},
"MaxNodes": {`5`, "{$.max_count}"},
"MaxPods": {`110`, "{$.max_pods}"},
"NodeTaints": {`["workload.sas.com/class=stateless:NoSchedule"]`, "{$.node_taints}"},
"NodeLabels": {`{"workload.sas.com/class":"stateless"}`, "{$.node_labels}"},
"AvailabilityZones": {`["1"]`, "{$.zones}"},
"FipsEnabled": {`false`, "{$.fips_enabled}"},
},
},
"stateful": {
expected: map[string]attrTuple{
"MachineType": {`Standard_D4s_v5`, "{$.vm_size}"},
"OsDiskSize": {`200`, "{$.os_disk_size_gb}"},
"MinNodes": {`0`, "{$.min_count}"},
"MaxNodes": {`3`, "{$.max_count}"},
"MaxPods": {`110`, "{$.max_pods}"},
"NodeTaints": {`["workload.sas.com/class=stateful:NoSchedule"]`, "{$.node_taints}"},
"NodeLabels": {`{"workload.sas.com/class":"stateful"}`, "{$.node_labels}"},
"AvailabilityZones": {`["1"]`, "{$.zones}"},
"FipsEnabled": {`false`, "{$.fips_enabled}"},
},
},
"cas": {
expected: map[string]attrTuple{
"MachineType": {`Standard_E16ds_v5`, "{$.vm_size}"},
"OsDiskSize": {`200`, "{$.os_disk_size_gb}"},
"MinNodes": {`0`, "{$.min_count}"},
"MaxNodes": {`5`, "{$.max_count}"},
"MaxPods": {`110`, "{$.max_pods}"},
"NodeTaints": {`["workload.sas.com/class=cas:NoSchedule"]`, "{$.node_taints}"},
"NodeLabels": {`{"workload.sas.com/class":"cas"}`, "{$.node_labels}"},
"AvailabilityZones": {`["1"]`, "{$.zones}"},
"FipsEnabled": {`false`, "{$.fips_enabled}"},
},
},
"compute": {
expected: map[string]attrTuple{
"MachineType": {`Standard_D4ds_v5`, "{$.vm_size}"},
"OsDiskSize": {`200`, "{$.os_disk_size_gb}"},
"MinNodes": {`1`, "{$.min_count}"},
"MaxNodes": {`5`, "{$.max_count}"},
"MaxPods": {`110`, "{$.max_pods}"},
"NodeTaints": {`["workload.sas.com/class=compute:NoSchedule"]`, "{$.node_taints}"},
"NodeLabels": {`{"launcher.sas.com/prepullImage":"sas-programming-environment","workload.sas.com/class":"compute"}`, "{$.node_labels}"},
"AvailabilityZones": {`["1"]`, "{$.zones}"},
"FipsEnabled": {`false`, "{$.fips_enabled}"},
},
},
}

variables := getDefaultPlanVars(t)
plan, err := initPlanWithVariables(t, variables)
require.NotNil(t, plan)
require.NoError(t, err)

for name, tc := range nodepoolTests {
t.Run(name, func(t *testing.T) {
resourceMapName := "module.node_pools[\"" + name + "\"].azurerm_kubernetes_cluster_node_pool.autoscale_node_pool[0]"
for attrName, attrTuple := range tc.expected {
t.Run(attrName, func(t *testing.T) {
runTest(t, testCase{
expected: attrTuple.expectedValue,
resourceMapName: resourceMapName,
attributeJsonPath: attrTuple.jsonPath,
}, plan)
})
}
})
}
}
241 changes: 120 additions & 121 deletions test/default_integration_plan_test.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
package test

import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"testing"

Expand Down Expand Up @@ -229,70 +227,70 @@ func TestDefaults(t *testing.T) {
//supportPlan := cluster.AttributeValues["support_plan"]
//assert.Equal(t, supportPlan, "KubernetesOfficial", "Unexpected cluster_support_tier")

// Additional Node Pools
statelessNodePool := plan.ResourcePlannedValuesMap["module.node_pools[\"stateless\"].azurerm_kubernetes_cluster_node_pool.autoscale_node_pool[0]"]
statelessStruct := &NodePool{
MachineType: "Standard_D4s_v5",
OsDiskSize: 200,
MinNodes: 0,
MaxNodes: 5,
MaxPods: 110,
NodeTaints: []string{"workload.sas.com/class=stateless:NoSchedule"},
NodeLabels: map[string]string{
"workload.sas.com/class": "stateless",
},
AvailabilityZones: []string{"1"},
FipsEnabled: false,
}
verifyNodePools(t, statelessNodePool, statelessStruct)

statefulNodePool := plan.ResourcePlannedValuesMap["module.node_pools[\"stateful\"].azurerm_kubernetes_cluster_node_pool.autoscale_node_pool[0]"]
statefulStruct := &NodePool{
MachineType: "Standard_D4s_v5",
OsDiskSize: 200,
MinNodes: 0,
MaxNodes: 3,
MaxPods: 110,
NodeTaints: []string{"workload.sas.com/class=stateful:NoSchedule"},
NodeLabels: map[string]string{
"workload.sas.com/class": "stateful",
},
AvailabilityZones: []string{"1"},
FipsEnabled: false,
}
verifyNodePools(t, statefulNodePool, statefulStruct)
casNodePool := plan.ResourcePlannedValuesMap["module.node_pools[\"cas\"].azurerm_kubernetes_cluster_node_pool.autoscale_node_pool[0]"]
casStruct := &NodePool{
MachineType: "Standard_E16ds_v5",
OsDiskSize: 200,
MinNodes: 0,
MaxNodes: 5,
MaxPods: 110,
NodeTaints: []string{"workload.sas.com/class=cas:NoSchedule"},
NodeLabels: map[string]string{
"workload.sas.com/class": "cas",
},
AvailabilityZones: []string{"1"},
FipsEnabled: false,
}
verifyNodePools(t, casNodePool, casStruct)

computeNodePool := plan.ResourcePlannedValuesMap["module.node_pools[\"compute\"].azurerm_kubernetes_cluster_node_pool.autoscale_node_pool[0]"]
computeStruct := &NodePool{
MachineType: "Standard_D4ds_v5",
OsDiskSize: 200,
MinNodes: 1,
MaxNodes: 5,
MaxPods: 110,
NodeTaints: []string{"workload.sas.com/class=compute:NoSchedule"},
NodeLabels: map[string]string{
"workload.sas.com/class": "compute",
"launcher.sas.com/prepullImage": "sas-programming-environment",
},
AvailabilityZones: []string{"1"},
FipsEnabled: false,
}
verifyNodePools(t, computeNodePool, computeStruct)
// Additional Node Pools - TEST MOVED TO TEST TABLE
// statelessNodePool := plan.ResourcePlannedValuesMap["module.node_pools[\"stateless\"].azurerm_kubernetes_cluster_node_pool.autoscale_node_pool[0]"]
// statelessStruct := &NodePool{
// MachineType: "Standard_D4s_v5",
// OsDiskSize: 200,
// MinNodes: 0,
// MaxNodes: 5,
// MaxPods: 110,
// NodeTaints: []string{"workload.sas.com/class=stateless:NoSchedule"},
// NodeLabels: map[string]string{
// "workload.sas.com/class": "stateless",
// },
// AvailabilityZones: []string{"1"},
// FipsEnabled: false,
// }
// verifyNodePools(t, statelessNodePool, statelessStruct)

// statefulNodePool := plan.ResourcePlannedValuesMap["module.node_pools[\"stateful\"].azurerm_kubernetes_cluster_node_pool.autoscale_node_pool[0]"]
// statefulStruct := &NodePool{
// MachineType: "Standard_D4s_v5",
// OsDiskSize: 200,
// MinNodes: 0,
// MaxNodes: 3,
// MaxPods: 110,
// NodeTaints: []string{"workload.sas.com/class=stateful:NoSchedule"},
// NodeLabels: map[string]string{
// "workload.sas.com/class": "stateful",
// },
// AvailabilityZones: []string{"1"},
// FipsEnabled: false,
// }
// verifyNodePools(t, statefulNodePool, statefulStruct)
// casNodePool := plan.ResourcePlannedValuesMap["module.node_pools[\"cas\"].azurerm_kubernetes_cluster_node_pool.autoscale_node_pool[0]"]
// casStruct := &NodePool{
// MachineType: "Standard_E16ds_v5",
// OsDiskSize: 200,
// MinNodes: 0,
// MaxNodes: 5,
// MaxPods: 110,
// NodeTaints: []string{"workload.sas.com/class=cas:NoSchedule"},
// NodeLabels: map[string]string{
// "workload.sas.com/class": "cas",
// },
// AvailabilityZones: []string{"1"},
// FipsEnabled: false,
// }
// verifyNodePools(t, casNodePool, casStruct)

// computeNodePool := plan.ResourcePlannedValuesMap["module.node_pools[\"compute\"].azurerm_kubernetes_cluster_node_pool.autoscale_node_pool[0]"]
// computeStruct := &NodePool{
// MachineType: "Standard_D4ds_v5",
// OsDiskSize: 200,
// MinNodes: 1,
// MaxNodes: 5,
// MaxPods: 110,
// NodeTaints: []string{"workload.sas.com/class=compute:NoSchedule"},
// NodeLabels: map[string]string{
// "workload.sas.com/class": "compute",
// "launcher.sas.com/prepullImage": "sas-programming-environment",
// },
// AvailabilityZones: []string{"1"},
// FipsEnabled: false,
// }
// verifyNodePools(t, computeNodePool, computeStruct)

// VAI: TEST MOVED TO TEST TABLE
//// storage_type
Expand Down Expand Up @@ -343,61 +341,62 @@ func testSSHKey(t *testing.T, cluster *tfjson.StateResource) bool {
return key != ""
}

func verifyNodePools(t *testing.T, nodePool *tfjson.StateResource, expectedValues *NodePool) {
// machine_type
assert.Equal(t, expectedValues.MachineType, nodePool.AttributeValues["vm_size"], "Unexpected machine_type.")

// os_disk_size
assert.Equal(t, expectedValues.OsDiskSize, nodePool.AttributeValues["os_disk_size_gb"], "Unexpected os_disk_size.")

// min_nodes
assert.Equal(t, expectedValues.MinNodes, nodePool.AttributeValues["min_count"], "Unexpected min_nodes.")

// max_nodes
assert.Equal(t, expectedValues.MaxNodes, nodePool.AttributeValues["max_count"], "Unexpected max_nodes.")

// max_pods
assert.Equal(t, expectedValues.MaxPods, nodePool.AttributeValues["max_pods"], "Unexpected max_pods.")

// node_taints
for index, nodeTaint := range expectedValues.NodeTaints {
assert.Equal(t, nodeTaint, nodePool.AttributeValues["node_taints"].([]interface{})[index].(string), "Unexpected Node Taints")
}

// node_labels
nodeLabelsStatus := true
nodeLabels := nodePool.AttributeValues["node_labels"]
// Convert the interface {}(map[string]interface {}) to JSON string
j, err := json.Marshal(nodeLabels)
if err != nil {
t.Log("Error parsing tfplan's Node Labels: ", err)
nodeLabelsStatus = false
}
// Unmarshal the JSON string into the map
var result map[string]string
err = json.Unmarshal(j, &result)
if err != nil {
t.Log("Error unmarshaling Node Labels Json string: ", err)
nodeLabelsStatus = false
}
// If no previous errors, verify that the maps are equal
if nodeLabelsStatus {
assert.True(t, reflect.DeepEqual(expectedValues.NodeLabels, result), "Unexpected Node Labels")
} else {
assert.Fail(t, "Unexpected errors parsing Node Labels")
}

// node_pools_availability_zone
for index, az := range expectedValues.AvailabilityZones {
assert.Equal(t, az, nodePool.AttributeValues["zones"].([]interface{})[index].(string), "Unexpected Availability Zones")
}

// fips_enabled
assert.Equal(t, expectedValues.FipsEnabled, nodePool.AttributeValues["fips_enabled"], "Unexpected fips_enabled.")

// node_pools_proximity_placement - Can't find in tfplan

}
// TEST MOVED TO TEST TABLE
// func verifyNodePools(t *testing.T, nodePool *tfjson.StateResource, expectedValues *NodePool) {
// // machine_type
// assert.Equal(t, expectedValues.MachineType, nodePool.AttributeValues["vm_size"], "Unexpected machine_type.")

// // os_disk_size
// assert.Equal(t, expectedValues.OsDiskSize, nodePool.AttributeValues["os_disk_size_gb"], "Unexpected os_disk_size.")

// // min_nodes
// assert.Equal(t, expectedValues.MinNodes, nodePool.AttributeValues["min_count"], "Unexpected min_nodes.")

// // max_nodes
// assert.Equal(t, expectedValues.MaxNodes, nodePool.AttributeValues["max_count"], "Unexpected max_nodes.")

// // max_pods
// assert.Equal(t, expectedValues.MaxPods, nodePool.AttributeValues["max_pods"], "Unexpected max_pods.")

// // node_taints
// for index, nodeTaint := range expectedValues.NodeTaints {
// assert.Equal(t, nodeTaint, nodePool.AttributeValues["node_taints"].([]interface{})[index].(string), "Unexpected Node Taints")
// }

// // node_labels
// nodeLabelsStatus := true
// nodeLabels := nodePool.AttributeValues["node_labels"]
// // Convert the interface {}(map[string]interface {}) to JSON string
// j, err := json.Marshal(nodeLabels)
// if err != nil {
// t.Log("Error parsing tfplan's Node Labels: ", err)
// nodeLabelsStatus = false
// }
// // Unmarshal the JSON string into the map
// var result map[string]string
// err = json.Unmarshal(j, &result)
// if err != nil {
// t.Log("Error unmarshaling Node Labels Json string: ", err)
// nodeLabelsStatus = false
// }
// // If no previous errors, verify that the maps are equal
// if nodeLabelsStatus {
// assert.True(t, reflect.DeepEqual(expectedValues.NodeLabels, result), "Unexpected Node Labels")
// } else {
// assert.Fail(t, "Unexpected errors parsing Node Labels")
// }

// // node_pools_availability_zone
// for index, az := range expectedValues.AvailabilityZones {
// assert.Equal(t, az, nodePool.AttributeValues["zones"].([]interface{})[index].(string), "Unexpected Availability Zones")
// }

// // fips_enabled - TEST MOVED TO TEST TABLE
// // assert.Equal(t, expectedValues.FipsEnabled, nodePool.AttributeValues["fips_enabled"], "Unexpected fips_enabled.")

// // node_pools_proximity_placement - Can't find in tfplan

// }

// Subnet func
func verifySubnets(t *testing.T, subnet *tfjson.StateResource, expectedValues *Subnet) {
Expand Down
1 change: 1 addition & 0 deletions test/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/pquerna/otp v1.4.0 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tmccombs/hcl2json v0.6.4 // indirect
Expand Down

0 comments on commit a79f9f0

Please sign in to comment.