-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathgatherInfo.go
144 lines (133 loc) · 4.36 KB
/
gatherInfo.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
package main
import (
"context"
"encoding/json"
"strings"
corev1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
metricsv1b1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
)
func getNodeMetrics(clientset *kubernetes.Clientset) (nodeMetricList *metricsv1b1.NodeMetricsList) {
data, err := clientset.RESTClient().Get().AbsPath("apis/metrics.k8s.io/v1beta1/nodes").DoRaw(context.Background())
check(err)
err = json.Unmarshal(data, &nodeMetricList)
check(err)
return nodeMetricList
}
func gatherInfo(clientset *kubernetes.Clientset, nodeLabel *string) (clusterInfo ClusterInfo) {
nodeInfo := make(map[string]NodeInfo)
labelSlice := strings.Split(*nodeLabel, "=")
nodeLabelKey := labelSlice[0]
nodeLabelValue := ""
if nodeLabelKey != "" {
nodeLabelValue = labelSlice[1]
}
// List all nodes
nodes, err := clientset.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
panic(err.Error())
}
if nodeLabelKey != "" {
clusterInfo.NodeLabel = nodeLabelKey
for _, v := range nodes.Items {
if !v.Spec.Unschedulable {
for label, value := range v.ObjectMeta.Labels {
if label == nodeLabelKey {
if value == nodeLabelValue {
node := nodeInfo[v.Name]
node.PrintOutput = true
nodeInfo[v.Name] = node
}
}
}
}
}
} else {
for _, v := range nodes.Items {
if !v.Spec.Unschedulable {
node := nodeInfo[v.Name]
node.PrintOutput = true
nodeInfo[v.Name] = node
}
}
}
for _, v := range nodes.Items {
if nodeInfo[v.Name].PrintOutput {
cpu := v.Status.Allocatable.Cpu()
mem := v.Status.Allocatable.Memory()
pods := v.Status.Allocatable.Pods()
if cpu.Value() > clusterInfo.NminusCPU.Value() {
clusterInfo.NminusCPU = *cpu
clusterInfo.NminusMemory = *mem
clusterInfo.NminusPods = *pods
}
clusterInfo.ClusterAllocatableMemory.Add(*mem)
clusterInfo.ClusterAllocatableCPU.Add(*cpu)
clusterInfo.ClusterAllocatablePods.Add(*pods)
node := nodeInfo[v.Name]
node.AllocatableCPU = *v.Status.Allocatable.Cpu()
node.AllocatableMemory = *v.Status.Allocatable.Memory()
node.AllocatablePods = *v.Status.Allocatable.Pods()
nodeInfo[v.Name] = node
}
}
// List quotas
quotas, err := clientset.CoreV1().ResourceQuotas("").List(context.Background(), metav1.ListOptions{})
if err != nil {
panic(err.Error())
}
// Add all the quotas up
for _, v := range quotas.Items {
limitmem := v.Spec.Hard[corev1.ResourceLimitsMemory]
limitcpu := v.Spec.Hard[corev1.ResourceLimitsCPU]
requestmem := v.Spec.Hard[corev1.ResourceRequestsMemory]
requestcpu := v.Spec.Hard[corev1.ResourceRequestsCPU]
pods := v.Spec.Hard[corev1.ResourcePods]
clusterInfo.RqclusterAllocatedLimitsMemory.Add(limitmem)
clusterInfo.RqclusterAllocatedLimitsCPU.Add(limitcpu)
clusterInfo.RqclusterAllocatedPods.Add(pods)
clusterInfo.RqclusterAllocatedRequestsMemory.Add(requestmem)
clusterInfo.RqclusterAllocatedRequestsCPU.Add(requestcpu)
}
nodeMetricList := getNodeMetrics(clientset)
for _, metricNode := range nodeMetricList.Items {
cpuUsed := metricNode.Usage.Cpu()
memUsed := metricNode.Usage.Memory()
node := nodeInfo[metricNode.Name]
node.UsedCPU = *cpuUsed
node.UsedMemory = *memUsed
nodeInfo[metricNode.Name] = node
}
pods, err := clientset.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{})
check(err)
for _, pod := range pods.Items {
node := nodeInfo[pod.Spec.NodeName]
if pod.Status.Phase != "Failed" {
if pod.Status.Phase != "Succeeded" {
for _, container := range pod.Spec.Containers {
crrm := container.Resources.Requests.Memory()
crlm := container.Resources.Limits.Memory()
crrc := container.Resources.Requests.Cpu()
UsedMemRequests := &resource.Quantity{}
UsedMemLimits := &resource.Quantity{}
UsedCPURequests := &resource.Quantity{}
UsedMemRequests.Add(node.UsedMemoryRequests)
UsedMemRequests.Add(*crrm)
UsedMemLimits.Add(node.UsedMemoryLimits)
UsedMemLimits.Add(*crlm)
UsedCPURequests.Add(node.UsedCPURequests)
UsedCPURequests.Add(*crrc)
node.UsedMemoryRequests = *UsedMemRequests
node.UsedMemoryLimits = *UsedMemLimits
node.UsedCPURequests = *UsedCPURequests
}
node.UsedPods++
}
}
nodeInfo[pod.Spec.NodeName] = node
}
clusterInfo.NodeInfo = nodeInfo
return clusterInfo
}