Skip to content

Commit

Permalink
Merge pull request #5743 from vishalanarase/civo-support-pool-cinfig
Browse files Browse the repository at this point in the history
[civo] Support pool wise config
  • Loading branch information
k8s-ci-robot authored May 29, 2023
2 parents c2b969d + ccb6a67 commit 97c12df
Show file tree
Hide file tree
Showing 3 changed files with 311 additions and 26 deletions.
93 changes: 67 additions & 26 deletions cluster-autoscaler/cloudprovider/civo/civo_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,51 +119,92 @@ func newManager(configReader io.Reader, discoveryOpts cloudprovider.NodeGroupDis
// Refresh refreshes the cache holding the nodegroups. This is called by the CA
// based on the `--scan-interval`. By default it's 10 seconds.
func (m *Manager) Refresh() error {
var minSize int
var maxSize int
var workerConfigFound = false
var (
minSize int
maxSize int
workerConfigFound = false
poolConfigFound = false
poolGroups []*NodeGroup
workerGroups []*NodeGroup
)

pools, err := m.client.ListKubernetesClusterPools(m.clusterID)
if err != nil {
return fmt.Errorf("couldn't list Kubernetes cluster pools: %s", err)
}

klog.V(4).Infof("refreshing workers node group kubernetes cluster: %q", m.clusterID)

for _, specString := range m.discoveryOpts.NodeGroupSpecs {
spec, err := dynamic.SpecFromString(specString, true)
if err != nil {
return fmt.Errorf("failed to parse node group spec: %v", err)
}

if spec.Name == "workers" {
minSize = spec.MinSize
maxSize = spec.MaxSize
workerConfigFound = true
klog.V(4).Infof("found configuration for workers node group: min: %d max: %d", minSize, maxSize)
} else {
poolConfigFound = true
pool := m.getNodeGroupConfig(spec, pools)
if pool != nil {
poolGroups = append(poolGroups, pool)
}
klog.V(4).Infof("found configuration for pool node group: min: %d max: %d", minSize, maxSize)
}
}
if !workerConfigFound {
return fmt.Errorf("no workers node group configuration found")
}

pools, err := m.client.ListKubernetesClusterPools(m.clusterID)
if err != nil {
return fmt.Errorf("couldn't list Kubernetes cluster pools: %s", err)
if poolConfigFound {
m.nodeGroups = poolGroups
} else if workerConfigFound {
for _, nodePool := range pools {
np := nodePool
klog.V(4).Infof("adding node pool: %q", nodePool.ID)

workerGroups = append(workerGroups, &NodeGroup{
id: nodePool.ID,
clusterID: m.clusterID,
client: m.client,
nodePool: &np,
minSize: minSize,
maxSize: maxSize,
})
}
m.nodeGroups = workerGroups
} else {
return fmt.Errorf("no workers node group configuration found")
}

klog.V(4).Infof("refreshing workers node group kubernetes cluster: %q min: %d max: %d", m.clusterID, minSize, maxSize)

var group []*NodeGroup
for _, nodePool := range pools {
np := nodePool
klog.V(4).Infof("adding node pool: %q", nodePool.ID)

group = append(group, &NodeGroup{
id: nodePool.ID,
clusterID: m.clusterID,
client: m.client,
nodePool: &np,
minSize: minSize,
maxSize: maxSize,
})
// If both config found, pool config get precedence
if poolConfigFound && workerConfigFound {
m.nodeGroups = poolGroups
}

if len(group) == 0 {
if len(m.nodeGroups) == 0 {
klog.V(4).Info("cluster-autoscaler is disabled. no node pools are configured")
}

m.nodeGroups = group
return nil
}

// getNodeGroupConfig get the node group configuration from the cluster pool configuration
func (m *Manager) getNodeGroupConfig(spec *dynamic.NodeGroupSpec, pools []civocloud.KubernetesPool) *NodeGroup {
for _, nodePool := range pools {
if spec.Name == nodePool.ID {
np := nodePool
klog.V(4).Infof("adding node pool: %q min: %d max: %d", nodePool.ID, spec.MinSize, spec.MaxSize)

return &NodeGroup{
id: nodePool.ID,
clusterID: m.clusterID,
client: m.client,
nodePool: &np,
minSize: spec.MinSize,
maxSize: spec.MaxSize,
}
}
}
return nil
}
63 changes: 63 additions & 0 deletions cluster-autoscaler/cloudprovider/civo/civo_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,3 +174,66 @@ func TestCivoManager_RefreshWithNodeSpec(t *testing.T) {
assert.Equal(t, 10, manager.nodeGroups[0].maxSize, "maximum node for node group does not match")
})
}

func TestCivoManager_RefreshWithNodeSpecPool(t *testing.T) {
t.Run("success", func(t *testing.T) {
cfg := `{"cluster_id": "123456", "api_key": "123-123-123", "api_url": "https://api.civo.com", "region": "test"}`
nodeGroupSpecs := []string{"1:5:pool-1", "5:10:pool-2"}
nodeGroupDiscoveryOptions := cloudprovider.NodeGroupDiscoveryOptions{NodeGroupSpecs: nodeGroupSpecs}
manager, err := newManager(bytes.NewBufferString(cfg), nodeGroupDiscoveryOptions)
assert.NoError(t, err)

client := &civoClientMock{}

client.On("ListKubernetesClusterPools", manager.clusterID).Return(
[]civocloud.KubernetesPool{
{
ID: "pool-1",
Count: 2,
Size: "small",
InstanceNames: []string{"test-1", "test-2"},
Instances: []civocloud.KubernetesInstance{
{
ID: "1",
Hostname: "test-1",
Status: "ACTIVE",
},
{
ID: "2",
Hostname: "test-1",
Status: "ACTIVE",
},
},
},
{
ID: "pool-2",
Count: 2,
Size: "small",
InstanceNames: []string{"test-1", "test-2"},
Instances: []civocloud.KubernetesInstance{
{
ID: "3",
Hostname: "test-3",
Status: "ACTIVE",
},
{
ID: "4",
Hostname: "test-4",
Status: "BUILDING",
},
},
},
},
nil,
).Once()

manager.client = client
err = manager.Refresh()
assert.NoError(t, err)
assert.Equal(t, 2, len(manager.nodeGroups), "number of node groups do not match")
assert.Equal(t, 1, manager.nodeGroups[0].minSize, "minimum node for node group does not match")
assert.Equal(t, 5, manager.nodeGroups[0].maxSize, "maximum node for node group does not match")
assert.Equal(t, 5, manager.nodeGroups[1].minSize, "minimum node for node group does not match")
assert.Equal(t, 10, manager.nodeGroups[1].maxSize, "maximum node for node group does not match")
})
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: ["storage.k8s.io"]
resources: ["csistoragecapacities", "csidrivers"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events", "endpoints"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["endpoints"]
resourceNames: ["cluster-autoscaler"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["nodes", "namespaces"]
verbs: ["watch", "list", "get", "update"]
- apiGroups: [""]
resources:
- "pods"
- "services"
- "replicationcontrollers"
- "persistentvolumeclaims"
- "persistentvolumes"
verbs: ["watch", "list", "get"]
- apiGroups: ["extensions"]
resources: ["replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["watch", "list"]
- apiGroups: ["apps"]
resources: ["statefulsets", "replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "csinodes"]
verbs: ["watch", "list", "get"]
- apiGroups: ["batch", "extensions"]
resources: ["jobs"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resourceNames: ["cluster-autoscaler"]
resources: ["leases"]
verbs: ["get", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "list", "watch"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames:
["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"]
verbs: ["delete", "get", "update", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
app: cluster-autoscaler
spec:
replicas: 1
selector:
matchLabels:
app: cluster-autoscaler
template:
metadata:
labels:
app: cluster-autoscaler
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
spec:
serviceAccountName: cluster-autoscaler
containers:
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.25.0 # or your custom image
name: cluster-autoscaler
imagePullPolicy: Always
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- ./cluster-autoscaler
- --v=4
- --stderrthreshold=info
- --cloud-provider=civo
- --nodes=1:5:<POOL_ID>
- --nodes=5:10:<POOL_ID>
- --skip-nodes-with-local-storage=false
- --skip-nodes-with-system-pods=false
env:
- name: CIVO_API_URL
valueFrom:
secretKeyRef:
key: api-url
name: civo-api-access
- name: CIVO_API_KEY
valueFrom:
secretKeyRef:
key: api-key
name: civo-api-access
- name: CIVO_CLUSTER_ID
valueFrom:
secretKeyRef:
key: cluster-id
name: civo-api-access
- name: CIVO_REGION
valueFrom:
secretKeyRef:
key: region
name: civo-api-access

0 comments on commit 97c12df

Please sign in to comment.