-
Notifications
You must be signed in to change notification settings - Fork 136
/
Copy pathhost.go
126 lines (110 loc) · 3.18 KB
/
host.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
// Copyright 2025 PingCAP, Inc. Licensed under Apache-2.0.
package clusterinfo
import (
"sort"
"strings"
"github.com/pingcap/log"
"github.com/samber/lo"
"go.uber.org/zap"
"gorm.io/gorm"
"github.com/pingcap/tidb-dashboard/pkg/apiserver/clusterinfo/hostinfo"
"github.com/pingcap/tidb-dashboard/pkg/utils/topology"
)
// fetchAllInstanceHosts fetches all hosts in the cluster and return in ascending order.
func (s *Service) fetchAllInstanceHosts() ([]string, error) {
allHostsMap := make(map[string]struct{})
pdInfo, err := topology.FetchPDTopology(s.params.PDClient)
if err != nil {
return nil, err
}
for _, i := range pdInfo {
allHostsMap[i.IP] = struct{}{}
}
tikvInfo, tiFlashInfo, err := topology.FetchStoreTopology(s.params.PDClient)
if err != nil {
return nil, err
}
for _, i := range tikvInfo {
allHostsMap[i.IP] = struct{}{}
}
for _, i := range tiFlashInfo {
allHostsMap[i.IP] = struct{}{}
}
tidbInfo, err := topology.FetchTiDBTopology(s.lifecycleCtx, s.params.EtcdClient)
if err != nil {
return nil, err
}
for _, i := range tidbInfo {
allHostsMap[i.IP] = struct{}{}
}
ticdcInfo, err := topology.FetchTiCDCTopology(s.lifecycleCtx, s.params.EtcdClient)
if err != nil {
return nil, err
}
for _, i := range ticdcInfo {
allHostsMap[i.IP] = struct{}{}
}
tiproxyInfo, err := topology.FetchTiProxyTopology(s.lifecycleCtx, s.params.EtcdClient)
if err != nil {
return nil, err
}
for _, i := range tiproxyInfo {
allHostsMap[i.IP] = struct{}{}
}
tsoInfo, err := topology.FetchTSOTopology(s.lifecycleCtx, s.params.PDClient)
if err != nil {
if strings.Contains(err.Error(), "status code 404") {
tsoInfo = []topology.TSOInfo{}
} else {
return nil, err
}
}
for _, i := range tsoInfo {
allHostsMap[i.IP] = struct{}{}
}
schedulingInfo, err := topology.FetchSchedulingTopology(s.lifecycleCtx, s.params.PDClient)
if err != nil {
if strings.Contains(err.Error(), "status code 404") {
schedulingInfo = []topology.SchedulingInfo{}
} else {
return nil, err
}
}
for _, i := range schedulingInfo {
allHostsMap[i.IP] = struct{}{}
}
allHosts := lo.Keys(allHostsMap)
sort.Strings(allHosts)
return allHosts, nil
}
// fetchAllHostsInfo fetches all hosts and their information.
// Note: The returned data and error may both exist.
func (s *Service) fetchAllHostsInfo(db *gorm.DB) ([]*hostinfo.Info, error) {
allHosts, err := s.fetchAllInstanceHosts()
if err != nil {
return nil, err
}
allHostsInfoMap := make(map[string]*hostinfo.Info)
if e := hostinfo.FillFromClusterLoadTable(db, allHostsInfoMap); e != nil {
log.Warn("Failed to read cluster_load table", zap.Error(e))
err = e
}
if e := hostinfo.FillFromClusterHardwareTable(db, allHostsInfoMap); e != nil && err == nil {
log.Warn("Failed to read cluster_hardware table", zap.Error(e))
err = e
}
if e := hostinfo.FillInstances(db, allHostsInfoMap); e != nil && err == nil {
log.Warn("Failed to fill instances for hosts", zap.Error(e))
err = e
}
r := make([]*hostinfo.Info, 0, len(allHosts))
for _, host := range allHosts {
if im, ok := allHostsInfoMap[host]; ok {
r = append(r, im)
} else {
// Missing item
r = append(r, hostinfo.NewHostInfo(host))
}
}
return r, err
}