forked from pingcap/tidb-inspect-tools
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkafka.go
126 lines (108 loc) · 3.09 KB
/
kafka.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
package main
import (
"encoding/json"
"time"
"github.com/Shopify/sarama"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/unrolled/render"
)
const (
timeFormat = "2006-01-02 15:04:05"
maxRetry = 12
retryInterval = 5 * time.Second
)
//KafkaMsg represents kafka message
type KafkaMsg struct {
Title string `json:"event_object"`
Source string `json:"object_name"`
Instance string `json:"object_ip"`
Description string `json:"event_msg"`
Time string `json:"event_time"`
Level string `json:"event_level"`
Summary string `json:"summary"`
Expr string `json:"expr"`
Value string `json:"value"`
URL string `json:"url"`
}
//Run represents runtime information
type Run struct {
Rdr *render.Render
AlertMsgs chan *AlertData
KafkaClient sarama.SyncProducer
}
func getValue(kv KV, key string) string {
if val, ok := kv[key]; ok {
return val
}
return ""
}
//CreateKafkaProducer creates a new SyncProducer using the given broker addresses and configuration
func (r *Run) CreateKafkaProducer(addrs []string) error {
var err error
for i := 0; i < maxRetry; i++ {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
config.Producer.RequiredAcks = sarama.WaitForAll
r.KafkaClient, err = sarama.NewSyncProducer(addrs, config)
if err != nil {
log.Errorf("create kafka producer with error: %v", err)
time.Sleep(retryInterval)
continue
}
return nil
}
return errors.Trace(err)
}
//PushKafkaMsg pushes message to kafka cluster
func (r *Run) PushKafkaMsg(msg string) error {
kafkaMsg := &sarama.ProducerMessage{
Topic: *kafkaTopic,
Value: sarama.StringEncoder(msg),
}
partition, offset, err := r.KafkaClient.SendMessage(kafkaMsg)
if err != nil {
return errors.Trace(err)
}
log.Infof("Produced message %s to kafka cluster partition %d with offset %d", msg, partition, offset)
return nil
}
//TransferData transfers AlertData to string and sends message to kafka
func (r *Run) TransferData(ad *AlertData) {
for _, alert := range ad.Alerts {
kafkaMsg := &KafkaMsg{
Title: getValue(alert.Labels, "alertname"),
Source: getValue(alert.Labels, "env"),
Instance: getValue(alert.Labels, "instance"),
Description: getValue(alert.Annotations, "description"),
Time: alert.StartsAt.Format(timeFormat),
Level: getValue(alert.Labels, "level"),
Summary: getValue(alert.Annotations, "summary"),
Expr: getValue(alert.Labels, "expr"),
Value: getValue(alert.Annotations, "value"),
URL: alert.GeneratorURL,
}
alertByte, err := json.Marshal(kafkaMsg)
if err != nil {
log.Errorf("Failed to marshal KafkaMsg: %v", err)
continue
}
for i := 0; i < maxRetry; i++ {
if err := r.PushKafkaMsg(string(alertByte)); err != nil {
log.Errorf("Failed to produce message to kafka cluster: %v", err)
time.Sleep(retryInterval)
continue
}
return
}
}
}
//Scheduler for monitoring chan data
func (r *Run) Scheduler() {
for {
for alert := range r.AlertMsgs {
r.TransferData(alert)
}
time.Sleep(3 * time.Second)
}
}