From 5fd58e613637468bcdfd6f52ec609d62485b8a92 Mon Sep 17 00:00:00 2001 From: luohoufu Date: Fri, 10 Jan 2025 12:23:22 +0800 Subject: [PATCH 1/7] chore: code format --- common/elastic.go | 8 +- core/errors/errors.go | 30 +- core/security/enum/const.go | 12 +- model/alerting/alert.go | 98 +- model/alerting/condition.go | 39 +- model/alerting/destination.go | 19 +- model/alerting/filter.go | 4 +- model/alerting/filter_query.go | 14 +- model/alerting/metric.go | 29 +- model/alerting/resource.go | 17 +- model/alerting/rule.go | 92 +- model/alerting/rule_test.go | 133 +- model/alerting/schedule.go | 7 +- model/alerting/webhook.go | 14 +- model/email_server.go | 16 +- model/insight/dashboard.go | 28 +- model/insight/field_meta.go | 7 +- model/insight/metric_data.go | 66 +- model/insight/visualization.go | 18 +- model/insight/widget.go | 4 +- model/layout.go | 17 +- modules/agent/api/host.go | 22 +- modules/agent/api/remote_config.go | 2 +- modules/agent/common/cert.go | 20 +- modules/agent/common/config.go | 2 +- modules/agent/model/config.go | 14 +- modules/elastic/api/activity.go | 64 +- modules/elastic/api/alias.go | 10 +- modules/elastic/api/cluster_overview.go | 132 +- modules/elastic/api/discover.go | 55 +- modules/elastic/api/host.go | 32 +- modules/elastic/api/ilm.go | 8 +- modules/elastic/api/index_metrics.go | 670 ++++----- modules/elastic/api/index_overview.go | 261 ++-- modules/elastic/api/manage.go | 30 +- modules/elastic/api/metrics_util.go | 54 +- modules/elastic/api/metrics_util_test.go | 100 +- modules/elastic/api/node_metrics.go | 1287 ++++++++--------- modules/elastic/api/node_overview.go | 372 +++-- modules/elastic/api/proxy.go | 2 - modules/elastic/api/search.go | 123 +- modules/elastic/api/setting.go | 15 +- modules/elastic/api/shard.go | 4 +- modules/elastic/api/template.go | 8 +- modules/elastic/api/threadpool_metrics.go | 408 +++--- modules/elastic/api/trace_template.go | 43 +- modules/elastic/api/v1/cluster_overview.go | 38 +- modules/elastic/api/v1/index_metrics.go | 587 ++++---- modules/elastic/api/v1/index_overview.go | 186 ++- modules/elastic/api/v1/manage.go | 29 +- modules/elastic/api/v1/metrics_util.go | 25 +- modules/elastic/api/v1/node_overview.go | 309 ++-- modules/security/realm/authc/saml/main.go | 16 +- plugin/api/alerting/alert.go | 22 +- plugin/api/alerting/channel.go | 50 +- plugin/api/alerting/message.go | 128 +- plugin/api/email/api.go | 6 +- plugin/api/index_management/common_command.go | 41 +- plugin/api/index_management/document.go | 8 +- plugin/api/insight/dashboard.go | 4 +- plugin/api/insight/map_label.go | 16 +- plugin/api/insight/visualization.go | 2 +- plugin/managed/server/config.go | 4 +- plugin/managed/server/instance.go | 2 +- plugin/managed/server/manager.go | 2 +- service/alerting/action/email.go | 18 +- service/alerting/action/webhook.go | 5 +- service/alerting/common/helper.go | 18 +- service/alerting/constants.go | 14 +- service/alerting/elasticsearch/engine.go | 307 ++-- service/alerting/elasticsearch/engine_test.go | 50 +- service/alerting/elasticsearch/init.go | 2 +- service/alerting/engine.go | 11 +- service/alerting/env.go | 4 +- service/alerting/funcs/bytes.go | 2 +- service/alerting/funcs/date.go | 6 +- service/alerting/funcs/elastic.go | 12 +- service/alerting/funcs/function.go | 30 +- service/alerting/funcs/numberic.go | 6 +- service/alerting/funcs/strings.go | 4 +- service/alerting/parameter.go | 10 +- 81 files changed, 3160 insertions(+), 3224 deletions(-) diff --git a/common/elastic.go b/common/elastic.go index a42b1e3f..0747f38f 100644 --- a/common/elastic.go +++ b/common/elastic.go @@ -46,7 +46,7 @@ func GetMapStringValue(m util.MapStr, key string) string { func MapLabel(labelName, indexName, keyField, valueField string, client elastic.API, cacheLabels map[string]string) string { if len(cacheLabels) > 0 { - if v, ok := cacheLabels[labelName]; ok{ + if v, ok := cacheLabels[labelName]; ok { return v } } @@ -58,7 +58,7 @@ func MapLabel(labelName, indexName, keyField, valueField string, client elastic. return labelMaps[labelName] } -func GetLabelMaps( indexName, keyField, valueField string, client elastic.API, keyFieldValues []string, cacheSize int) (map[string]string, error){ +func GetLabelMaps(indexName, keyField, valueField string, client elastic.API, keyFieldValues []string, cacheSize int) (map[string]string, error) { if client == nil { return nil, fmt.Errorf("cluster client must not be empty") } @@ -89,7 +89,7 @@ func GetLabelMaps( indexName, keyField, valueField string, client elastic.API, k var key string if keyField == "_id" { key = hit.ID - }else{ + } else { key = GetMapStringValue(sourceM, keyField) } if key != "" { @@ -99,7 +99,7 @@ func GetLabelMaps( indexName, keyField, valueField string, client elastic.API, k return labelMaps, nil } -func ExecuteTemplate( tpl *template.Template, ctx map[string]interface{}) ([]byte, error){ +func ExecuteTemplate(tpl *template.Template, ctx map[string]interface{}) ([]byte, error) { msgBuffer := &bytes.Buffer{} err := tpl.Execute(msgBuffer, ctx) return msgBuffer.Bytes(), err diff --git a/core/errors/errors.go b/core/errors/errors.go index 6b685152..aa44df2c 100644 --- a/core/errors/errors.go +++ b/core/errors/errors.go @@ -29,24 +29,24 @@ import ( ) const ( - ErrTypeRequestParams = "request_params_error" - ErrTypeApplication = "application_error" - ErrTypeAlreadyExists = "already_exists_error" - ErrTypeNotExists = "not_exists_error" - ErrTypeIncorrectPassword = "incorrect_password_error" + ErrTypeRequestParams = "request_params_error" + ErrTypeApplication = "application_error" + ErrTypeAlreadyExists = "already_exists_error" + ErrTypeNotExists = "not_exists_error" + ErrTypeIncorrectPassword = "incorrect_password_error" ErrTypeDomainPrefixMismatch = "domain_prefix_mismatch_error" - ErrTypeDisabled = "disabled_error" - ErrTypeRequestTimeout = "request_timeout_error" + ErrTypeDisabled = "disabled_error" + ErrTypeRequestTimeout = "request_timeout_error" ) var ( ErrPasswordIncorrect = errors.New("incorrect password") - ErrNotExistsErr = errors.New("not exists") + ErrNotExistsErr = errors.New("not exists") ) type Error struct { - typ string - msg interface{} + typ string + msg interface{} field string } @@ -54,22 +54,22 @@ func (err Error) Error() string { return fmt.Sprintf("%s:%v: %v", err.typ, err.field, err.msg) } -//NewAppError returns an application error +// NewAppError returns an application error func NewAppError(msg any) *Error { return New(ErrTypeApplication, "", msg) } -//NewParamsError returns a request params error +// NewParamsError returns a request params error func NewParamsError(field string, msg any) *Error { return New(ErrTypeRequestParams, field, msg) } -//NewAlreadyExistsError returns an already exists error +// NewAlreadyExistsError returns an already exists error func NewAlreadyExistsError(field string, msg any) *Error { return New(ErrTypeAlreadyExists, field, msg) } -//NewNotExistsError returns a not exists error +// NewNotExistsError returns a not exists error func NewNotExistsError(field string, msg any) *Error { return New(ErrTypeNotExists, field, msg) } @@ -80,4 +80,4 @@ func New(typ string, field string, msg any) *Error { msg, field, } -} \ No newline at end of file +} diff --git a/core/security/enum/const.go b/core/security/enum/const.go index 58f93fa5..9f9968f2 100644 --- a/core/security/enum/const.go +++ b/core/security/enum/const.go @@ -144,8 +144,8 @@ const ( PermissionMigrationTaskWrite = "task:write" PermissionComparisonTaskRead = "comparison_task:read" PermissionComparisonTaskWrite = "comparison_task:write" - PermissionSmtpServerRead = "smtp_server:read" - PermissionSmtpServerWrite = "smtp_server:write" + PermissionSmtpServerRead = "smtp_server:read" + PermissionSmtpServerWrite = "smtp_server:write" ) var ( @@ -221,8 +221,8 @@ var ( DashboardAllPermission = []string{PermissionLayoutRead, PermissionLayoutWrite} WorkbenchReadPermission = []string{PermissionElasticsearchClusterRead, PermissionActivityRead, PermissionAlertMessageRead, PermissionElasticsearchMetricRead} WorkbenchAllPermission = WorkbenchReadPermission - SmtpServerReadPermission = []string{PermissionSmtpServerRead} - SmtpServerAllPermission = []string{PermissionSmtpServerRead, PermissionSmtpServerWrite} + SmtpServerReadPermission = []string{PermissionSmtpServerRead} + SmtpServerAllPermission = []string{PermissionSmtpServerRead, PermissionSmtpServerWrite} ) var AdminPrivilege = []string{ @@ -304,8 +304,8 @@ func init() { SubscriptionRead: SubscriptionReadPermission, SubscriptionAll: SubscriptionAllPermission, - SmtpServerRead: SmtpServerReadPermission, - SmtpServerAll: SmtpServerAllPermission, + SmtpServerRead: SmtpServerReadPermission, + SmtpServerAll: SmtpServerAllPermission, } } diff --git a/model/alerting/alert.go b/model/alerting/alert.go index bd4e52ed..38040650 100644 --- a/model/alerting/alert.go +++ b/model/alerting/alert.go @@ -32,72 +32,72 @@ import ( ) type Alert struct { - ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` - Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` - Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` - RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword }"` - RuleName string `json:"rule_name" elastic_mapping:"rule_name: { type: keyword }"` - ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword }"` - ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword }"` - Expression string `json:"expression" elastic_mapping:"expression: { type: keyword, copy_to:search_text }"` - Objects []string `json:"objects" elastic_mapping:"objects: { type:keyword,copy_to:search_text }"` - Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"` - Title string `json:"title" elastic_mapping:"title: { type: keyword }"` - Message string `json:"message" elastic_mapping:"context: { type: keyword, copy_to:search_text }"` - AcknowledgedTime interface{} `json:"acknowledged_time,omitempty"` - ActionExecutionResults []ActionExecutionResult `json:"action_execution_results,omitempty"` - RecoverActionResults []ActionExecutionResult `json:"recover_action_results,omitempty"` + ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` + Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` + Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` + RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword }"` + RuleName string `json:"rule_name" elastic_mapping:"rule_name: { type: keyword }"` + ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword }"` + ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword }"` + Expression string `json:"expression" elastic_mapping:"expression: { type: keyword, copy_to:search_text }"` + Objects []string `json:"objects" elastic_mapping:"objects: { type:keyword,copy_to:search_text }"` + Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"` + Title string `json:"title" elastic_mapping:"title: { type: keyword }"` + Message string `json:"message" elastic_mapping:"context: { type: keyword, copy_to:search_text }"` + AcknowledgedTime interface{} `json:"acknowledged_time,omitempty"` + ActionExecutionResults []ActionExecutionResult `json:"action_execution_results,omitempty"` + RecoverActionResults []ActionExecutionResult `json:"recover_action_results,omitempty"` EscalationActionResults []ActionExecutionResult `json:"escalation_action_results,omitempty"` - Users []string `json:"users,omitempty"` - State string `json:"state"` - Error string `json:"error,omitempty"` - IsNotified bool `json:"is_notified" elastic_mapping:"is_notified: { type: boolean }"` //标识本次检测是否发送了告警通知 - IsEscalated bool `json:"is_escalated" elastic_mapping:"is_escalated: { type: boolean }"` //标识本次检测是否发送了升级告警通知 - Conditions Condition `json:"condition"` - ConditionResult *ConditionResult `json:"condition_result,omitempty" elastic_mapping:"condition_result: { type: object,enabled:false }"` - SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` + Users []string `json:"users,omitempty"` + State string `json:"state"` + Error string `json:"error,omitempty"` + IsNotified bool `json:"is_notified" elastic_mapping:"is_notified: { type: boolean }"` //标识本次检测是否发送了告警通知 + IsEscalated bool `json:"is_escalated" elastic_mapping:"is_escalated: { type: boolean }"` //标识本次检测是否发送了升级告警通知 + Conditions Condition `json:"condition"` + ConditionResult *ConditionResult `json:"condition_result,omitempty" elastic_mapping:"condition_result: { type: object,enabled:false }"` + SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` } type ActionExecutionResult struct { ExecutionTime int `json:"execution_time"` Error string `json:"error"` - Result string `json:"result"` - Message string `json:"message"` - ChannelName string `json:"channel_name"` - ChannelType string `json:"channel_type"` - ChannelID string `json:"channel_id"` + Result string `json:"result"` + Message string `json:"message"` + ChannelName string `json:"channel_name"` + ChannelType string `json:"channel_type"` + ChannelID string `json:"channel_id"` } const ( AlertStateAlerting string = "alerting" AlertStateOK = "ok" - AlertStateError = "error" - AlertStateNodata = "nodata" + AlertStateError = "error" + AlertStateNodata = "nodata" ) const ( - MessageStateAlerting = "alerting" - MessageStateIgnored = "ignored" + MessageStateAlerting = "alerting" + MessageStateIgnored = "ignored" MessageStateRecovered = "recovered" ) type AlertMessage struct { - ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` - Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` - Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` - RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword,copy_to:search_text }"` - ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword,copy_to:search_text }"` - ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword,copy_to:search_text }"` - Title string `json:"title" elastic_mapping:"title: { type: keyword,copy_to:search_text }"` - Message string `json:"message" elastic_mapping:"content: { type: keyword,copy_to:search_text }"` - Status string `json:"status" elastic_mapping:"status: { type: keyword,copy_to:search_text }"` - IgnoredTime time.Time `json:"ignored_time,omitempty" elastic_mapping:"ignored_time: { type: date }"` - IgnoredReason string `json:"ignored_reason,omitempty" elastic_mapping:"ignored_reason: { type: keyword,copy_to:search_text }"` - IgnoredUser string `json:"ignored_user,omitempty" elastic_mapping:"ignored_user: { type: keyword,copy_to:search_text }"` - Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"` - SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` - Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"` - Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"` + ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` + Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` + Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` + RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword,copy_to:search_text }"` + ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword,copy_to:search_text }"` + ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword,copy_to:search_text }"` + Title string `json:"title" elastic_mapping:"title: { type: keyword,copy_to:search_text }"` + Message string `json:"message" elastic_mapping:"content: { type: keyword,copy_to:search_text }"` + Status string `json:"status" elastic_mapping:"status: { type: keyword,copy_to:search_text }"` + IgnoredTime time.Time `json:"ignored_time,omitempty" elastic_mapping:"ignored_time: { type: date }"` + IgnoredReason string `json:"ignored_reason,omitempty" elastic_mapping:"ignored_reason: { type: keyword,copy_to:search_text }"` + IgnoredUser string `json:"ignored_user,omitempty" elastic_mapping:"ignored_user: { type: keyword,copy_to:search_text }"` + Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"` + SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` + Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"` + Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"` } /* @@ -109,4 +109,4 @@ type AlertMessage struct { */ //message status (Active, Ignore, Recover) -//rule status (Active, Error, OK) \ No newline at end of file +//rule status (Active, Error, OK) diff --git a/model/alerting/condition.go b/model/alerting/condition.go index dbecb545..2e48e7ae 100644 --- a/model/alerting/condition.go +++ b/model/alerting/condition.go @@ -30,10 +30,11 @@ package alerting import "fmt" type Condition struct { - Operator string `json:"operator"` - Items []ConditionItem `json:"items"` + Operator string `json:"operator"` + Items []ConditionItem `json:"items"` } -func (cond *Condition) GetMinimumPeriodMatch() int{ + +func (cond *Condition) GetMinimumPeriodMatch() int { var minPeriodMatch = 0 for _, citem := range cond.Items { if citem.MinimumPeriodMatch > minPeriodMatch { @@ -45,14 +46,14 @@ func (cond *Condition) GetMinimumPeriodMatch() int{ type ConditionItem struct { //MetricName string `json:"metric"` - MinimumPeriodMatch int `json:"minimum_period_match"` - Operator string `json:"operator"` - Values []string `json:"values"` - Priority string `json:"priority"` - Expression string `json:"expression,omitempty"` + MinimumPeriodMatch int `json:"minimum_period_match"` + Operator string `json:"operator"` + Values []string `json:"values"` + Priority string `json:"priority"` + Expression string `json:"expression,omitempty"` } -func (cond *ConditionItem) GenerateConditionExpression()(conditionExpression string, err error){ +func (cond *ConditionItem) GenerateConditionExpression() (conditionExpression string, err error) { valueLength := len(cond.Values) if valueLength == 0 { return conditionExpression, fmt.Errorf("condition values: %v should not be empty", cond.Values) @@ -81,20 +82,20 @@ func (cond *ConditionItem) GenerateConditionExpression()(conditionExpression str type ConditionResult struct { ResultItems []ConditionResultItem `json:"result_items"` - QueryResult *QueryResult `json:"query_result"` + QueryResult *QueryResult `json:"query_result"` } type ConditionResultItem struct { - GroupValues []string `json:"group_values"` - ConditionItem *ConditionItem `json:"condition_item"` - IssueTimestamp interface{} `json:"issue_timestamp"` - ResultValue interface{} `json:"result_value"` //满足条件最后一个值 + GroupValues []string `json:"group_values"` + ConditionItem *ConditionItem `json:"condition_item"` + IssueTimestamp interface{} `json:"issue_timestamp"` + ResultValue interface{} `json:"result_value"` //满足条件最后一个值 RelationValues map[string]interface{} `json:"relation_values"` } var PriorityWeights = map[string]int{ - "info": 1, - "low": 2, - "medium": 3, - "high": 4, + "info": 1, + "low": 2, + "medium": 3, + "high": 4, "critical": 5, -} \ No newline at end of file +} diff --git a/model/alerting/destination.go b/model/alerting/destination.go index f1b0f4d9..d644ce94 100644 --- a/model/alerting/destination.go +++ b/model/alerting/destination.go @@ -33,18 +33,17 @@ import ( type Channel struct { orm.ORMObjectBase - Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"` - Type string `json:"type" elastic_mapping:"type:{type:keyword,copy_to:search_text}"` // email or webhook - Priority int `json:"priority,omitempty"` - Webhook *CustomWebhook `json:"webhook,omitempty" elastic_mapping:"webhook:{type:object}"` - SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` - SubType string `json:"sub_type" elastic_mapping:"sub_type:{type:keyword,copy_to:search_text}"` - Email *Email `json:"email,omitempty" elastic_mapping:"email:{type:object}"` - Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"` + Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"` + Type string `json:"type" elastic_mapping:"type:{type:keyword,copy_to:search_text}"` // email or webhook + Priority int `json:"priority,omitempty"` + Webhook *CustomWebhook `json:"webhook,omitempty" elastic_mapping:"webhook:{type:object}"` + SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` + SubType string `json:"sub_type" elastic_mapping:"sub_type:{type:keyword,copy_to:search_text}"` + Email *Email `json:"email,omitempty" elastic_mapping:"email:{type:object}"` + Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"` } - const ( ChannelEmail = "email" ChannelWebhook = "webhook" -) \ No newline at end of file +) diff --git a/model/alerting/filter.go b/model/alerting/filter.go index 90acb2fe..50c4f518 100644 --- a/model/alerting/filter.go +++ b/model/alerting/filter.go @@ -29,11 +29,11 @@ package alerting type Filter struct { And []FilterQuery `json:"and,omitempty"` - Or []FilterQuery `json:"or,omitempty"` + Or []FilterQuery `json:"or,omitempty"` Not []FilterQuery `json:"not,omitempty"` //MinimumShouldMatch int `json:"minimum_should_match"` } func (f Filter) IsEmpty() bool { return len(f.And) == 0 && len(f.Or) == 0 && len(f.Not) == 0 -} \ No newline at end of file +} diff --git a/model/alerting/filter_query.go b/model/alerting/filter_query.go index bc185ff8..ef0116bc 100644 --- a/model/alerting/filter_query.go +++ b/model/alerting/filter_query.go @@ -28,12 +28,12 @@ package alerting type FilterQuery struct { - Field string `json:"field,omitempty"` - Operator string `json:"operator,omitempty"` - Values []string `json:"values,omitempty"` - And []FilterQuery `json:"and,omitempty"` - Or []FilterQuery `json:"or,omitempty"` - Not []FilterQuery `json:"not,omitempty"` + Field string `json:"field,omitempty"` + Operator string `json:"operator,omitempty"` + Values []string `json:"values,omitempty"` + And []FilterQuery `json:"and,omitempty"` + Or []FilterQuery `json:"or,omitempty"` + Not []FilterQuery `json:"not,omitempty"` } func (fq FilterQuery) IsComplex() bool { @@ -42,4 +42,4 @@ func (fq FilterQuery) IsComplex() bool { func (f FilterQuery) IsEmpty() bool { return !f.IsComplex() && f.Operator == "" -} \ No newline at end of file +} diff --git a/model/alerting/metric.go b/model/alerting/metric.go index 999b492f..ac3869eb 100644 --- a/model/alerting/metric.go +++ b/model/alerting/metric.go @@ -36,13 +36,12 @@ import ( type Metric struct { insight.Metric - Title string `json:"title,omitempty"` //text template - Message string `json:"message,omitempty"` // text template + Title string `json:"title,omitempty"` //text template + Message string `json:"message,omitempty"` // text template Expression string `json:"expression,omitempty" elastic_mapping:"expression:{type:keyword,copy_to:search_text}"` //告警表达式,自动生成 eg: avg(cpu) > 80 } - -func (m *Metric) GenerateExpression() (string, error){ +func (m *Metric) GenerateExpression() (string, error) { if len(m.Items) == 1 { return fmt.Sprintf("%s(%s)", m.Items[0].Statistic, m.Items[0].Field), nil } @@ -50,12 +49,12 @@ func (m *Metric) GenerateExpression() (string, error){ return "", fmt.Errorf("formula should not be empty since there are %d metrics", len(m.Items)) } var ( - expressionBytes = []byte(m.Formula) + expressionBytes = []byte(m.Formula) metricExpression string ) for _, item := range m.Items { metricExpression = fmt.Sprintf("%s(%s)", item.Statistic, item.Field) - reg, err := regexp.Compile(item.Name+`([^\w]|$)`) + reg, err := regexp.Compile(item.Name + `([^\w]|$)`) if err != nil { return "", err } @@ -66,23 +65,23 @@ func (m *Metric) GenerateExpression() (string, error){ } type MetricItem struct { - Name string `json:"name"` - Field string `json:"field"` + Name string `json:"name"` + Field string `json:"field"` Statistic string `json:"statistic"` } type QueryResult struct { - Query string `json:"query"` - Raw string `json:"raw"` + Query string `json:"query"` + Raw string `json:"raw"` MetricData []MetricData `json:"metric_data"` - Nodata bool `json:"nodata"` - Min interface{} `json:"-"` - Max interface{} `json:"-"` + Nodata bool `json:"nodata"` + Min interface{} `json:"-"` + Max interface{} `json:"-"` } type MetricData struct { - GroupValues []string `json:"group_values"` - Data map[string][]TimeMetricData `json:"data"` + GroupValues []string `json:"group_values"` + Data map[string][]TimeMetricData `json:"data"` } type TimeMetricData []interface{} diff --git a/model/alerting/resource.go b/model/alerting/resource.go index 68395b67..5e49e0fc 100644 --- a/model/alerting/resource.go +++ b/model/alerting/resource.go @@ -32,20 +32,19 @@ import ( ) type Resource struct { - ID string `json:"resource_id" elastic_mapping:"resource_id:{type:keyword}"` - Name string `json:"resource_name" elastic_mapping:"resource_name:{type:keyword}"` - Type string `json:"type" elastic_mapping:"type:{type:keyword}"` - Objects []string `json:"objects" elastic_mapping:"objects:{type:keyword,copy_to:search_text}"` - Filter FilterQuery `json:"filter,omitempty" elastic_mapping:"-"` + ID string `json:"resource_id" elastic_mapping:"resource_id:{type:keyword}"` + Name string `json:"resource_name" elastic_mapping:"resource_name:{type:keyword}"` + Type string `json:"type" elastic_mapping:"type:{type:keyword}"` + Objects []string `json:"objects" elastic_mapping:"objects:{type:keyword,copy_to:search_text}"` + Filter FilterQuery `json:"filter,omitempty" elastic_mapping:"-"` RawFilter map[string]interface{} `json:"raw_filter,omitempty"` - TimeField string `json:"time_field,omitempty" elastic_mapping:"id:{type:keyword}"` - Context Context `json:"context"` + TimeField string `json:"time_field,omitempty" elastic_mapping:"id:{type:keyword}"` + Context Context `json:"context"` } -func (r Resource) Validate() error{ +func (r Resource) Validate() error { if r.TimeField == "" { return fmt.Errorf("TimeField can not be empty") } return nil } - diff --git a/model/alerting/rule.go b/model/alerting/rule.go index 19a4fc49..dc81ac69 100644 --- a/model/alerting/rule.go +++ b/model/alerting/rule.go @@ -33,33 +33,33 @@ import ( ) type Rule struct { - ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` - Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` - Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` - Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"` - Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:keyword}"` - Resource Resource `json:"resource" elastic_mapping:"resource:{type:object}"` - Metrics Metric `json:"metrics" elastic_mapping:"metrics:{type:object}"` - Conditions Condition `json:"conditions" elastic_mapping:"conditions:{type:object}"` - Channels *NotificationConfig `json:"channels,omitempty" elastic_mapping:"channels:{type:object}"` - NotificationConfig *NotificationConfig `json:"notification_config,omitempty" elastic_mapping:"notification_config:{type:object}"` + ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` + Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` + Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` + Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"` + Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:keyword}"` + Resource Resource `json:"resource" elastic_mapping:"resource:{type:object}"` + Metrics Metric `json:"metrics" elastic_mapping:"metrics:{type:object}"` + Conditions Condition `json:"conditions" elastic_mapping:"conditions:{type:object}"` + Channels *NotificationConfig `json:"channels,omitempty" elastic_mapping:"channels:{type:object}"` + NotificationConfig *NotificationConfig `json:"notification_config,omitempty" elastic_mapping:"notification_config:{type:object}"` RecoveryNotificationConfig *RecoveryNotificationConfig `json:"recovery_notification_config,omitempty" elastic_mapping:"recovery_notification_config:{type:object}"` - Schedule Schedule `json:"schedule" elastic_mapping:"schedule:{type:object}"` - LastNotificationTime time.Time `json:"-" elastic_mapping:"last_notification_time:{type:date}"` - LastTermStartTime time.Time `json:"-"` //标识最近一轮告警的开始时间 - LastEscalationTime time.Time `json:"-"` //标识最近一次告警升级发送通知的时间 - SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` - Expression string `json:"-"` - Creator struct { + Schedule Schedule `json:"schedule" elastic_mapping:"schedule:{type:object}"` + LastNotificationTime time.Time `json:"-" elastic_mapping:"last_notification_time:{type:date}"` + LastTermStartTime time.Time `json:"-"` //标识最近一轮告警的开始时间 + LastEscalationTime time.Time `json:"-"` //标识最近一次告警升级发送通知的时间 + SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` + Expression string `json:"-"` + Creator struct { Name string `json:"name" elastic_mapping:"name: { type: keyword }"` Id string `json:"id" elastic_mapping:"id: { type: keyword }"` } `json:"creator" elastic_mapping:"creator:{type:object}"` - Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"` - Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"` + Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"` + Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"` } -func (rule *Rule) GetOrInitExpression() (string, error){ - if rule.Expression != ""{ +func (rule *Rule) GetOrInitExpression() (string, error) { + if rule.Expression != "" { return rule.Expression, nil } sb := strings.Builder{} @@ -81,7 +81,8 @@ func (rule *Rule) GetOrInitExpression() (string, error){ rule.Expression = strings.ReplaceAll(sb.String(), "result", metricExp) return rule.Expression, nil } -//GetNotificationConfig for adapter old version config + +// GetNotificationConfig for adapter old version config func (rule *Rule) GetNotificationConfig() *NotificationConfig { if rule.NotificationConfig != nil { return rule.NotificationConfig @@ -96,37 +97,37 @@ func (rule *Rule) GetNotificationTitleAndMessage() (string, string) { } type NotificationConfig struct { - Enabled bool `json:"enabled"` - Title string `json:"title,omitempty"` //text template - Message string `json:"message,omitempty"` // text template - Normal []Channel `json:"normal,omitempty"` - Escalation []Channel `json:"escalation,omitempty"` - ThrottlePeriod string `json:"throttle_period,omitempty"` //沉默周期 - AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"` - EscalationThrottlePeriod string `json:"escalation_throttle_period,omitempty"` - EscalationEnabled bool `json:"escalation_enabled,omitempty"` + Enabled bool `json:"enabled"` + Title string `json:"title,omitempty"` //text template + Message string `json:"message,omitempty"` // text template + Normal []Channel `json:"normal,omitempty"` + Escalation []Channel `json:"escalation,omitempty"` + ThrottlePeriod string `json:"throttle_period,omitempty"` //沉默周期 + AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"` + EscalationThrottlePeriod string `json:"escalation_throttle_period,omitempty"` + EscalationEnabled bool `json:"escalation_enabled,omitempty"` } type RecoveryNotificationConfig struct { - Enabled bool `json:"enabled"` // channel enabled - Title string `json:"title"` //text template - Message string `json:"message"` // text template - AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"` - Normal []Channel `json:"normal,omitempty"` - EventEnabled bool `json:"event_enabled"` + Enabled bool `json:"enabled"` // channel enabled + Title string `json:"title"` //text template + Message string `json:"message"` // text template + AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"` + Normal []Channel `json:"normal,omitempty"` + EventEnabled bool `json:"event_enabled"` } -type MessageTemplate struct{ - Type string `json:"type"` +type MessageTemplate struct { + Type string `json:"type"` Source string `json:"source"` } type TimeRange struct { Start string `json:"start"` - End string `json:"end"` + End string `json:"end"` } -func (tr *TimeRange) Include( t time.Time) bool { +func (tr *TimeRange) Include(t time.Time) bool { if tr.Start == "" || tr.End == "" { return true } @@ -135,10 +136,11 @@ func (tr *TimeRange) Include( t time.Time) bool { } type FilterParam struct { - Start interface{} `json:"start"` - End interface{} `json:"end"` - BucketSize string `json:"bucket_size"` + Start interface{} `json:"start"` + End interface{} `json:"end"` + BucketSize string `json:"bucket_size"` } + //ctx //rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values], -//check_status ,timestamp, \ No newline at end of file +//check_status ,timestamp, diff --git a/model/alerting/rule_test.go b/model/alerting/rule_test.go index a4e6fe47..10d2daf2 100644 --- a/model/alerting/rule_test.go +++ b/model/alerting/rule_test.go @@ -36,7 +36,7 @@ import ( "time" ) -func TestCreateRule( t *testing.T) { +func TestCreateRule(t *testing.T) { rule := Rule{ //ORMObjectBase: orm.ORMObjectBase{ // ID: util.GetUUID(), @@ -45,9 +45,9 @@ func TestCreateRule( t *testing.T) { //}, Enabled: true, Resource: Resource{ - ID: "c8i18llath2blrusdjng", - Type: "elasticsearch", - Objects: []string{".infini_metrics*"}, + ID: "c8i18llath2blrusdjng", + Type: "elasticsearch", + Objects: []string{".infini_metrics*"}, TimeField: "timestamp", Filter: FilterQuery{ And: []FilterQuery{ @@ -69,68 +69,68 @@ func TestCreateRule( t *testing.T) { }, }, }, - //Metrics: Metric{ - // PeriodInterval: "1m", - // MaxPeriods: 15, - // Items: []MetricItem{ - // {Name: "red_health", Field: "*", Statistic: "count", Group: []string{"metadata.labels.cluster_id"}}, - // }, - //}, - //Conditions: Condition{ - // Operator: "any", - // Items: []ConditionItem{ - // { MinimumPeriodMatch: 1, Operator: "gte", Values: []string{"1"}, Priority: "error", AlertMessage: "集群健康状态为 Red"}, - // }, - //}, + //Metrics: Metric{ + // PeriodInterval: "1m", + // MaxPeriods: 15, + // Items: []MetricItem{ + // {Name: "red_health", Field: "*", Statistic: "count", Group: []string{"metadata.labels.cluster_id"}}, + // }, + //}, + //Conditions: Condition{ + // Operator: "any", + // Items: []ConditionItem{ + // { MinimumPeriodMatch: 1, Operator: "gte", Values: []string{"1"}, Priority: "error", AlertMessage: "集群健康状态为 Red"}, + // }, + //}, - Metrics: Metric{ - Metric: insight.Metric{ - Groups: []insight.MetricGroupItem{{"metadata.labels.cluster_id", 10}, {"metadata.labels.node_id", 10}}, - Items: []insight.MetricItem{ - {Name: "a", Field: "payload.elasticsearch.node_stats.fs.total.free_in_bytes", Statistic: "min" }, - {Name: "b", Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes", Statistic: "max"}, - }, - BucketSize: "1m", - Formula: "a/b*100", - }, - //Expression: "min(fs.free_in_bytes)/max(fs.total_in_bytes)*100", - }, - Conditions: Condition{ - Operator: "any", - Items: []ConditionItem{ - {MinimumPeriodMatch: 1, Operator: "lte", Values: []string{"76"}, Priority: "error"}, - }, + Metrics: Metric{ + Metric: insight.Metric{ + Groups: []insight.MetricGroupItem{{"metadata.labels.cluster_id", 10}, {"metadata.labels.node_id", 10}}, + Items: []insight.MetricItem{ + {Name: "a", Field: "payload.elasticsearch.node_stats.fs.total.free_in_bytes", Statistic: "min"}, + {Name: "b", Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes", Statistic: "max"}, }, + BucketSize: "1m", + Formula: "a/b*100", + }, + //Expression: "min(fs.free_in_bytes)/max(fs.total_in_bytes)*100", + }, + Conditions: Condition{ + Operator: "any", + Items: []ConditionItem{ + {MinimumPeriodMatch: 1, Operator: "lte", Values: []string{"76"}, Priority: "error"}, + }, + }, - Channels: &NotificationConfig{ - Normal: []Channel{ - {Name: "钉钉", Type: ChannelWebhook, Webhook: &CustomWebhook{ - HeaderParams: map[string]string{ - "Message-Type": "application/json", - }, - Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`, - Method: http.MethodPost, - URL: "https://oapi.dingtalk.com/robot/send?access_token=XXXXXX", - }}, - }, - Escalation: []Channel{ - {Type: ChannelWebhook, Name: "微信", Webhook: &CustomWebhook{ - HeaderParams: map[string]string{ - "Message-Type": "application/json", - }, - Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`, - Method: http.MethodPost, - URL: "https://oapi.weixin.com/robot/send?access_token=6a5c7c9454ff74537a6de493153b1da68860942d4b0aeb33797cb68b5111b077", - }}, - }, - ThrottlePeriod: "1h", - AcceptTimeRange: TimeRange{ - Start: "8:00", - End: "21:00", - }, - EscalationEnabled: false, - EscalationThrottlePeriod: "30m", + Channels: &NotificationConfig{ + Normal: []Channel{ + {Name: "钉钉", Type: ChannelWebhook, Webhook: &CustomWebhook{ + HeaderParams: map[string]string{ + "Message-Type": "application/json", + }, + Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`, + Method: http.MethodPost, + URL: "https://oapi.dingtalk.com/robot/send?access_token=XXXXXX", + }}, + }, + Escalation: []Channel{ + {Type: ChannelWebhook, Name: "微信", Webhook: &CustomWebhook{ + HeaderParams: map[string]string{ + "Message-Type": "application/json", + }, + Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`, + Method: http.MethodPost, + URL: "https://oapi.weixin.com/robot/send?access_token=6a5c7c9454ff74537a6de493153b1da68860942d4b0aeb33797cb68b5111b077", + }}, }, + ThrottlePeriod: "1h", + AcceptTimeRange: TimeRange{ + Start: "8:00", + End: "21:00", + }, + EscalationEnabled: false, + EscalationThrottlePeriod: "30m", + }, } //err := rule.Metrics.GenerateExpression() //if err != nil { @@ -145,15 +145,12 @@ func TestCreateRule( t *testing.T) { fmt.Println(exp) } - - - -func TestTimeRange_Include( t *testing.T) { +func TestTimeRange_Include(t *testing.T) { tr := TimeRange{ Start: "08:00", - End: "18:31", + End: "18:31", } fmt.Println(tr.Include(time.Now())) - ti,_ := time.Parse(time.RFC3339, "2022-04-11T10:31:38.911000504Z") + ti, _ := time.Parse(time.RFC3339, "2022-04-11T10:31:38.911000504Z") fmt.Println(time.Now().Sub(ti)) } diff --git a/model/alerting/schedule.go b/model/alerting/schedule.go index 93cab09a..1e1ed41f 100644 --- a/model/alerting/schedule.go +++ b/model/alerting/schedule.go @@ -28,14 +28,11 @@ package alerting type Schedule struct { - Cron *Cron `json:"cron,omitempty" elastic_mapping:"cron:{type:object}"` + Cron *Cron `json:"cron,omitempty" elastic_mapping:"cron:{type:object}"` Interval string `json:"interval,omitempty" elastic_mapping:"interval:{type:keyword}"` } type Cron struct { Expression string `json:"expression" elastic_mapping:"expression:{type:text}"` - Timezone string `json:"timezone" elastic_mapping:"timezone:{type:keyword}"` + Timezone string `json:"timezone" elastic_mapping:"timezone:{type:keyword}"` } - - - diff --git a/model/alerting/webhook.go b/model/alerting/webhook.go index fb035fd1..d390530e 100644 --- a/model/alerting/webhook.go +++ b/model/alerting/webhook.go @@ -29,19 +29,19 @@ package alerting type CustomWebhook struct { HeaderParams map[string]string `json:"header_params,omitempty" elastic_mapping:"header_params:{type:object,enabled:false}"` - Method string `json:"method" elastic_mapping:"method:{type:keyword}"` - URL string `json:"url,omitempty"` - Body string `json:"body" elastic_mapping:"body:{type:text}"` + Method string `json:"method" elastic_mapping:"method:{type:keyword}"` + URL string `json:"url,omitempty"` + Body string `json:"body" elastic_mapping:"body:{type:text}"` } type Email struct { - ServerID string `json:"server_id" elastic_mapping:"server_id:{type:keyword}"` + ServerID string `json:"server_id" elastic_mapping:"server_id:{type:keyword}"` Recipients struct { To []string `json:"to,omitempty" elastic_mapping:"to:{type:keyword}"` CC []string `json:"cc,omitempty" elastic_mapping:"cc:{type:keyword}"` BCC []string `json:"bcc,omitempty" elastic_mapping:"bcc:{type:keyword}"` } `json:"recipients" elastic_mapping:"recipients:{type:object}"` - Subject string `json:"subject" elastic_mapping:"subject:{type:text}"` - Body string `json:"body" elastic_mapping:"body:{type:text}"` + Subject string `json:"subject" elastic_mapping:"subject:{type:text}"` + Body string `json:"body" elastic_mapping:"body:{type:text}"` ContentType string `json:"content_type" elastic_mapping:"content_type:{type:keyword}"` -} \ No newline at end of file +} diff --git a/model/email_server.go b/model/email_server.go index 218c0ad1..30259e3f 100644 --- a/model/email_server.go +++ b/model/email_server.go @@ -35,13 +35,13 @@ import ( type EmailServer struct { orm.ORMObjectBase - Name string `json:"name" elastic_mapping:"name:{type:text}"` - Host string `json:"host" elastic_mapping:"host:{type:keyword}"` - Port int `json:"port" elastic_mapping:"port:{type:keyword}"` - TLS bool `json:"tls" elastic_mapping:"tls:{type:keyword}"` - Auth *model.BasicAuth `json:"auth" elastic_mapping:"auth:{type:object}"` - Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"` - CredentialID string `json:"credential_id" elastic_mapping:"credential_id:{type:keyword}"` + Name string `json:"name" elastic_mapping:"name:{type:text}"` + Host string `json:"host" elastic_mapping:"host:{type:keyword}"` + Port int `json:"port" elastic_mapping:"port:{type:keyword}"` + TLS bool `json:"tls" elastic_mapping:"tls:{type:keyword}"` + Auth *model.BasicAuth `json:"auth" elastic_mapping:"auth:{type:object}"` + Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"` + CredentialID string `json:"credential_id" elastic_mapping:"credential_id:{type:keyword}"` } func (serv *EmailServer) Validate(requireName bool) error { @@ -55,4 +55,4 @@ func (serv *EmailServer) Validate(requireName bool) error { return fmt.Errorf("name can not be empty") } return nil -} \ No newline at end of file +} diff --git a/model/insight/dashboard.go b/model/insight/dashboard.go index 187189af..b63ef269 100644 --- a/model/insight/dashboard.go +++ b/model/insight/dashboard.go @@ -30,19 +30,19 @@ package insight import "time" type Dashboard struct { - ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` - Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` - Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` - ClusterId string `json:"cluster_id" elastic_mapping:"cluster_id: { type: keyword }"` - IndexPattern string `json:"index_pattern" elastic_mapping:"index_pattern: { type: keyword }"` - TimeField string `json:"time_field,omitempty" elastic_mapping:"time_field: { type: keyword }"` - Filter interface{} `json:"filter,omitempty" elastic_mapping:"filter: { type: object, enabled:false }"` - BucketSize string `json:"bucket_size" elastic_mapping:"bucket_size: { type: keyword }"` - Title string `json:"title" elastic_mapping:"title: { type: keyword }"` - Description string `json:"description" elastic_mapping:"description: { type: keyword }"` + ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` + Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` + Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` + ClusterId string `json:"cluster_id" elastic_mapping:"cluster_id: { type: keyword }"` + IndexPattern string `json:"index_pattern" elastic_mapping:"index_pattern: { type: keyword }"` + TimeField string `json:"time_field,omitempty" elastic_mapping:"time_field: { type: keyword }"` + Filter interface{} `json:"filter,omitempty" elastic_mapping:"filter: { type: object, enabled:false }"` + BucketSize string `json:"bucket_size" elastic_mapping:"bucket_size: { type: keyword }"` + Title string `json:"title" elastic_mapping:"title: { type: keyword }"` + Description string `json:"description" elastic_mapping:"description: { type: keyword }"` Visualizations interface{} `json:"visualizations" elastic_mapping:"visualizations: { type: object, enabled:false }"` - Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword }"` - User string `json:"user" elastic_mapping:"user: { type: keyword }"` - Query interface{} `json:"query,omitempty" elastic_mapping:"query: { type: object, enabled:false }"` - TimeFilter interface{} `json:"time_filter,omitempty" elastic_mapping:"time_filter: { type: object, enabled:false }"` + Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword }"` + User string `json:"user" elastic_mapping:"user: { type: keyword }"` + Query interface{} `json:"query,omitempty" elastic_mapping:"query: { type: object, enabled:false }"` + TimeFilter interface{} `json:"time_filter,omitempty" elastic_mapping:"time_filter: { type: object, enabled:false }"` } diff --git a/model/insight/field_meta.go b/model/insight/field_meta.go index fae5ac4d..840c2950 100644 --- a/model/insight/field_meta.go +++ b/model/insight/field_meta.go @@ -27,9 +27,8 @@ package insight - type SeriesItem struct { - Type string `json:"type"` + Type string `json:"type"` Options map[string]interface{} `json:"options"` - Metric Metric `json:"metric"` -} \ No newline at end of file + Metric Metric `json:"metric"` +} diff --git a/model/insight/metric_data.go b/model/insight/metric_data.go index a0e99644..2feb5e2f 100644 --- a/model/insight/metric_data.go +++ b/model/insight/metric_data.go @@ -34,33 +34,33 @@ import ( ) type Metric struct { - AggTypes []string `json:"agg_types,omitempty"` - IndexPattern string `json:"index_pattern,omitempty"` - TimeField string `json:"time_field,omitempty"` - BucketSize string `json:"bucket_size,omitempty"` - Filter interface{} `json:"filter,omitempty"` - Groups []MetricGroupItem `json:"groups,omitempty"` //bucket group - Sort []GroupSort `json:"sort,omitempty"` - ClusterId string `json:"cluster_id,omitempty"` - Formula string `json:"formula,omitempty"` - Items []MetricItem `json:"items"` - FormatType string `json:"format_type,omitempty"` - TimeFilter interface{} `json:"time_filter,omitempty"` - TimeBeforeGroup bool `json:"time_before_group,omitempty"` - BucketLabel *BucketLabel `json:"bucket_label,omitempty"` + AggTypes []string `json:"agg_types,omitempty"` + IndexPattern string `json:"index_pattern,omitempty"` + TimeField string `json:"time_field,omitempty"` + BucketSize string `json:"bucket_size,omitempty"` + Filter interface{} `json:"filter,omitempty"` + Groups []MetricGroupItem `json:"groups,omitempty"` //bucket group + Sort []GroupSort `json:"sort,omitempty"` + ClusterId string `json:"cluster_id,omitempty"` + Formula string `json:"formula,omitempty"` + Items []MetricItem `json:"items"` + FormatType string `json:"format_type,omitempty"` + TimeFilter interface{} `json:"time_filter,omitempty"` + TimeBeforeGroup bool `json:"time_before_group,omitempty"` + BucketLabel *BucketLabel `json:"bucket_label,omitempty"` } type GroupSort struct { - Key string `json:"key"` + Key string `json:"key"` Direction string `json:"direction"` } type MetricGroupItem struct { Field string `json:"field"` - Limit int `json:"limit"` + Limit int `json:"limit"` } -func (m *Metric) GenerateExpression() (string, error){ +func (m *Metric) GenerateExpression() (string, error) { if len(m.Items) == 1 { return fmt.Sprintf("%s(%s)", m.Items[0].Statistic, m.Items[0].Field), nil } @@ -68,12 +68,12 @@ func (m *Metric) GenerateExpression() (string, error){ return "", fmt.Errorf("formula should not be empty since there are %d metrics", len(m.Items)) } var ( - expressionBytes = []byte(m.Formula) + expressionBytes = []byte(m.Formula) metricExpression string ) for _, item := range m.Items { metricExpression = fmt.Sprintf("%s(%s)", item.Statistic, item.Field) - reg, err := regexp.Compile(item.Name+`([^\w]|$)`) + reg, err := regexp.Compile(item.Name + `([^\w]|$)`) if err != nil { return "", err } @@ -102,12 +102,12 @@ func (m *Metric) ValidateSortKey() error { mm[item.Name] = &item } for _, sortItem := range m.Sort { - if !util.StringInArray([]string{"desc", "asc"}, sortItem.Direction){ + if !util.StringInArray([]string{"desc", "asc"}, sortItem.Direction) { return fmt.Errorf("unknown sort direction [%s]", sortItem.Direction) } - if v, ok := mm[sortItem.Key]; !ok && !util.StringInArray([]string{"_key", "_count"}, sortItem.Key){ + if v, ok := mm[sortItem.Key]; !ok && !util.StringInArray([]string{"_key", "_count"}, sortItem.Key) { return fmt.Errorf("unknown sort key [%s]", sortItem.Key) - }else{ + } else { if v != nil && v.Statistic == "derivative" { return fmt.Errorf("can not sort by pipeline agg [%s]", v.Statistic) } @@ -117,26 +117,26 @@ func (m *Metric) ValidateSortKey() error { } type MetricItem struct { - Name string `json:"name,omitempty"` - Field string `json:"field"` - FieldType string `json:"field_type,omitempty"` - Statistic string `json:"statistic,omitempty"` + Name string `json:"name,omitempty"` + Field string `json:"field"` + FieldType string `json:"field_type,omitempty"` + Statistic string `json:"statistic,omitempty"` } type MetricDataItem struct { - Timestamp interface{} `json:"timestamp,omitempty"` - Value interface{} `json:"value"` - Groups []string `json:"groups,omitempty"` - GroupLabel string `json:"group_label,omitempty"` + Timestamp interface{} `json:"timestamp,omitempty"` + Value interface{} `json:"value"` + Groups []string `json:"groups,omitempty"` + GroupLabel string `json:"group_label,omitempty"` } type MetricData struct { - Groups []string `json:"groups,omitempty"` - Data map[string][]MetricDataItem + Groups []string `json:"groups,omitempty"` + Data map[string][]MetricDataItem GroupLabel string `json:"group_label,omitempty"` } type BucketLabel struct { - Enabled bool `json:"enabled"` + Enabled bool `json:"enabled"` Template string `json:"template,omitempty"` } diff --git a/model/insight/visualization.go b/model/insight/visualization.go index 084eac71..493a5611 100644 --- a/model/insight/visualization.go +++ b/model/insight/visualization.go @@ -30,15 +30,15 @@ package insight import "time" type Visualization struct { - ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` - Created *time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` - Updated *time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` - Title string `json:"title,omitempty" elastic_mapping:"title: { type: keyword }"` - IndexPattern string `json:"index_pattern,omitempty" elastic_mapping:"index_pattern: { type: keyword }"` - ClusterId string `json:"cluster_id,omitempty" elastic_mapping:"cluster_id: { type: keyword }"` - Series []SeriesItem `json:"series" elastic_mapping:"series: { type: object,enabled:false }"` - Position *Position `json:"position,omitempty" elastic_mapping:"position: { type: object,enabled:false }"` - Description string `json:"description,omitempty" elastic_mapping:"description: { type: keyword }"` + ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` + Created *time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` + Updated *time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` + Title string `json:"title,omitempty" elastic_mapping:"title: { type: keyword }"` + IndexPattern string `json:"index_pattern,omitempty" elastic_mapping:"index_pattern: { type: keyword }"` + ClusterId string `json:"cluster_id,omitempty" elastic_mapping:"cluster_id: { type: keyword }"` + Series []SeriesItem `json:"series" elastic_mapping:"series: { type: object,enabled:false }"` + Position *Position `json:"position,omitempty" elastic_mapping:"position: { type: object,enabled:false }"` + Description string `json:"description,omitempty" elastic_mapping:"description: { type: keyword }"` } type Position struct { diff --git a/model/insight/widget.go b/model/insight/widget.go index ff079666..3a262384 100644 --- a/model/insight/widget.go +++ b/model/insight/widget.go @@ -31,6 +31,6 @@ import "infini.sh/framework/core/orm" type Widget struct { orm.ORMObjectBase - Title string `json:"title" elastic_mapping:"title: { type: text }"` - Config interface{}`json:"config" elastic_mapping:"config: { type: object,enabled:false }"` + Title string `json:"title" elastic_mapping:"title: { type: text }"` + Config interface{} `json:"config" elastic_mapping:"config: { type: object,enabled:false }"` } diff --git a/model/layout.go b/model/layout.go index 36f5163c..9a510d9c 100644 --- a/model/layout.go +++ b/model/layout.go @@ -31,20 +31,21 @@ import "infini.sh/framework/core/orm" type Layout struct { orm.ORMObjectBase - Name string `json:"name" elastic_mapping:"name: { type: text }"` + Name string `json:"name" elastic_mapping:"name: { type: text }"` Description string `json:"description" elastic_mapping:"description: { type: text }"` - Creator struct { + Creator struct { Name string `json:"name"` Id string `json:"id"` } `json:"creator"` - ViewID string `json:"view_id" elastic_mapping:"view_id: { type: keyword }"` - Config interface{} `json:"config" elastic_mapping:"config: { type: object, enabled:false }"` - Reserved bool `json:"reserved,omitempty" elastic_mapping:"reserved:{type:boolean}"` - Type LayoutType `json:"type" elastic_mapping:"type: { type: keyword }"` - IsFixed bool `json:"is_fixed" elastic_mapping:"is_fixed: { type: boolean }"` + ViewID string `json:"view_id" elastic_mapping:"view_id: { type: keyword }"` + Config interface{} `json:"config" elastic_mapping:"config: { type: object, enabled:false }"` + Reserved bool `json:"reserved,omitempty" elastic_mapping:"reserved:{type:boolean}"` + Type LayoutType `json:"type" elastic_mapping:"type: { type: keyword }"` + IsFixed bool `json:"is_fixed" elastic_mapping:"is_fixed: { type: boolean }"` } type LayoutType string + const ( LayoutTypeWorkspace LayoutType = "workspace" -) \ No newline at end of file +) diff --git a/modules/agent/api/host.go b/modules/agent/api/host.go index 63afb42b..2491013c 100644 --- a/modules/agent/api/host.go +++ b/modules/agent/api/host.go @@ -46,9 +46,9 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt HostName string `json:"host_name"` IP string `json:"ip"` Source string `json:"source"` - OSName string `json:"os_name"` - OSArch string `json:"os_arch"` - NodeID string `json:"node_uuid"` + OSName string `json:"os_name"` + OSArch string `json:"os_arch"` + NodeID string `json:"node_uuid"` } err := h.DecodeJSON(req, &reqBody) if err != nil { @@ -84,7 +84,7 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt hostInfo = &host.HostInfo{ IP: hi.IP, OSInfo: host.OS{ - Platform: hi.OSName, + Platform: hi.OSName, KernelArch: hi.OSArch, }, NodeID: hi.NodeID, @@ -97,7 +97,7 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt } hostInfo.Timestamp = time.Now() var ctx *orm.Context - if i == len(reqBody) - 1 { + if i == len(reqBody)-1 { ctx = &orm.Context{ Refresh: "wait_for", } @@ -112,7 +112,7 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt continue } } - resBody := util.MapStr{ + resBody := util.MapStr{ "success": true, } if len(errors) > 0 { @@ -168,15 +168,15 @@ func (h *APIHandler) GetHostAgentInfo(w http.ResponseWriter, req *http.Request, } h.WriteJSON(w, util.MapStr{ - "host_id": hostID, + "host_id": hostID, "agent_id": obj.ID, - "version": obj.Application.Version, - "status": hostInfo.AgentStatus, + "version": obj.Application.Version, + "status": hostInfo.AgentStatus, "endpoint": obj.GetEndpoint(), }, http.StatusOK) } -func getHost(hostID string) (*host.HostInfo, error){ +func getHost(hostID string) (*host.HostInfo, error) { hostInfo := &host.HostInfo{} hostInfo.ID = hostID exists, err := orm.Get(hostInfo) @@ -241,4 +241,4 @@ func (h *APIHandler) GetHostElasticProcess(w http.ResponseWriter, req *http.Requ h.WriteJSON(w, util.MapStr{ //"elastic_processes": processes, }, http.StatusOK) -} \ No newline at end of file +} diff --git a/modules/agent/api/remote_config.go b/modules/agent/api/remote_config.go index a1a60762..ed8d82d6 100644 --- a/modules/agent/api/remote_config.go +++ b/modules/agent/api/remote_config.go @@ -31,13 +31,13 @@ import ( "bytes" "fmt" log "github.com/cihub/seelog" - "infini.sh/framework/modules/configs/common" "infini.sh/framework/core/elastic" "infini.sh/framework/core/global" "infini.sh/framework/core/kv" "infini.sh/framework/core/model" "infini.sh/framework/core/orm" "infini.sh/framework/core/util" + "infini.sh/framework/modules/configs/common" common2 "infini.sh/framework/modules/elastic/common" metadata2 "infini.sh/framework/modules/elastic/metadata" "time" diff --git a/modules/agent/common/cert.go b/modules/agent/common/cert.go index 5f46d790..161c8821 100644 --- a/modules/agent/common/cert.go +++ b/modules/agent/common/cert.go @@ -37,15 +37,15 @@ import ( "path" ) -func GenerateClientCert(caFile, caKey string) (caCert, clientCertPEM, clientKeyPEM []byte, err error){ +func GenerateClientCert(caFile, caKey string) (caCert, clientCertPEM, clientKeyPEM []byte, err error) { return generateCert(caFile, caKey, false) } -func GenerateServerCert(caFile, caKey string) (caCert, serverCertPEM, serverKeyPEM []byte, err error){ +func GenerateServerCert(caFile, caKey string) (caCert, serverCertPEM, serverKeyPEM []byte, err error) { return generateCert(caFile, caKey, true) } -func generateCert(caFile, caKey string, isServer bool)(caCert, instanceCertPEM, instanceKeyPEM []byte, err error){ +func generateCert(caFile, caKey string, isServer bool) (caCert, instanceCertPEM, instanceKeyPEM []byte, err error) { pool := x509.NewCertPool() caCert, err = os.ReadFile(caFile) if err != nil { @@ -69,11 +69,11 @@ func generateCert(caFile, caKey string, isServer bool)(caCert, instanceCertPEM, if err != nil { return } - if isServer{ + if isServer { b = &pem.Block{Type: "CERTIFICATE", Bytes: caCertBytes} certPEM := pem.EncodeToMemory(b) - instanceCertPEM, instanceKeyPEM, err = util.GenerateServerCert(rootCert, certKey.(*rsa.PrivateKey), certPEM, nil) - }else{ + instanceCertPEM, instanceKeyPEM, err = util.GenerateServerCert(rootCert, certKey.(*rsa.PrivateKey), certPEM, nil) + } else { _, instanceCertPEM, instanceKeyPEM = util.GetClientCert(rootCert, certKey) } return caCert, instanceCertPEM, instanceKeyPEM, nil @@ -84,9 +84,9 @@ func GetAgentInstanceCerts(caFile, caKey string) (string, string, error) { instanceCrt := path.Join(dataDir, "certs/agent/instance.crt") instanceKey := path.Join(dataDir, "certs/agent/instance.key") var ( - err error + err error clientCertPEM []byte - clientKeyPEM []byte + clientKeyPEM []byte ) if util.FileExists(instanceCrt) && util.FileExists(instanceKey) { return instanceCrt, instanceKey, nil @@ -96,7 +96,7 @@ func GetAgentInstanceCerts(caFile, caKey string) (string, string, error) { return "", "", err } baseDir := path.Join(dataDir, "certs/agent") - if !util.IsExist(baseDir){ + if !util.IsExist(baseDir) { err = os.MkdirAll(baseDir, 0775) if err != nil { return "", "", err @@ -111,4 +111,4 @@ func GetAgentInstanceCerts(caFile, caKey string) (string, string, error) { return "", "", err } return instanceCrt, instanceKey, nil -} \ No newline at end of file +} diff --git a/modules/agent/common/config.go b/modules/agent/common/config.go index 68b68c36..1f4b7936 100644 --- a/modules/agent/common/config.go +++ b/modules/agent/common/config.go @@ -30,8 +30,8 @@ package common import ( log "github.com/cihub/seelog" "infini.sh/console/modules/agent/model" - "infini.sh/framework/modules/configs/common" "infini.sh/framework/core/env" + "infini.sh/framework/modules/configs/common" ) func GetAgentConfig() *model.AgentConfig { diff --git a/modules/agent/model/config.go b/modules/agent/model/config.go index a3f4359d..a5bae4d1 100644 --- a/modules/agent/model/config.go +++ b/modules/agent/model/config.go @@ -28,14 +28,14 @@ package model type AgentConfig struct { - Enabled bool `config:"enabled"` - Setup *SetupConfig `config:"setup"` + Enabled bool `config:"enabled"` + Setup *SetupConfig `config:"setup"` } type SetupConfig struct { - DownloadURL string `config:"download_url"` - CACertFile string `config:"ca_cert"` - CAKeyFile string `config:"ca_key"` - ConsoleEndpoint string `config:"console_endpoint"` - Port string `config:"port"` + DownloadURL string `config:"download_url"` + CACertFile string `config:"ca_cert"` + CAKeyFile string `config:"ca_key"` + ConsoleEndpoint string `config:"console_endpoint"` + Port string `config:"port"` } diff --git a/modules/elastic/api/activity.go b/modules/elastic/api/activity.go index 43d23d8b..b46fcfbe 100644 --- a/modules/elastic/api/activity.go +++ b/modules/elastic/api/activity.go @@ -38,36 +38,36 @@ import ( "strings" ) -func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody:=util.MapStr{} - reqBody := struct{ - Keyword string `json:"keyword"` - Size int `json:"size"` - From int `json:"from"` - Aggregations []elastic.SearchAggParam `json:"aggs"` - Highlight elastic.SearchHighlightParam `json:"highlight"` - Filter elastic.SearchFilterParam `json:"filter"` - Sort []string `json:"sort"` - StartTime interface{} `json:"start_time"` - EndTime interface{} `json:"end_time"` +func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := util.MapStr{} + reqBody := struct { + Keyword string `json:"keyword"` + Size int `json:"size"` + From int `json:"from"` + Aggregations []elastic.SearchAggParam `json:"aggs"` + Highlight elastic.SearchHighlightParam `json:"highlight"` + Filter elastic.SearchFilterParam `json:"filter"` + Sort []string `json:"sort"` + StartTime interface{} `json:"start_time"` + EndTime interface{} `json:"end_time"` }{} err := h.DecodeJSON(req, &reqBody) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) return } aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations) aggs["term_cluster_id"] = util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.cluster_id", - "size": 1000, + "size": 1000, }, "aggs": util.MapStr{ "term_cluster_name": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.cluster_name", - "size": 1, + "size": 1, }, }, }, @@ -86,9 +86,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http clusterFilter, hasAllPrivilege := h.GetClusterFilter(req, "metadata.labels.cluster_id") if !hasAllPrivilege && clusterFilter == nil { - h.WriteJSON(w, elastic.SearchResponse{ - - }, http.StatusOK) + h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK) return } if !hasAllPrivilege && clusterFilter != nil { @@ -97,9 +95,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req) if !hasAllPrivilege && len(indexPrivilege) == 0 { - h.WriteJSON(w, elastic.SearchResponse{ - - }, http.StatusOK) + h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK) return } if !hasAllPrivilege { @@ -107,10 +103,10 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http for clusterID, indices := range indexPrivilege { var ( wildcardIndices []string - normalIndices []string + normalIndices []string ) for _, index := range indices { - if strings.Contains(index,"*") { + if strings.Contains(index, "*") { wildcardIndices = append(wildcardIndices, index) continue } @@ -120,8 +116,8 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http if len(wildcardIndices) > 0 { subShould = append(subShould, util.MapStr{ "query_string": util.MapStr{ - "query": strings.Join(wildcardIndices, " "), - "fields": []string{"metadata.labels.index_name"}, + "query": strings.Join(wildcardIndices, " "), + "fields": []string{"metadata.labels.index_name"}, "default_operator": "OR", }, }) @@ -146,7 +142,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http { "bool": util.MapStr{ "minimum_should_match": 1, - "should": subShould, + "should": subShould, }, }, }, @@ -156,7 +152,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http indexFilter := util.MapStr{ "bool": util.MapStr{ "minimum_should_match": 1, - "should": indexShould, + "should": indexShould, }, } filter = append(filter, indexFilter) @@ -168,7 +164,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http { "query_string": util.MapStr{ "default_field": "*", - "query": reqBody.Keyword, + "query": reqBody.Keyword, }, }, } @@ -176,15 +172,15 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http var boolQuery = util.MapStr{ "filter": filter, } - if len(should) >0 { + if len(should) > 0 { boolQuery["should"] = should boolQuery["minimum_should_match"] = 1 } query := util.MapStr{ "aggs": aggs, "size": reqBody.Size, - "from": reqBody.From, - "_source": []string{"changelog", "id", "metadata", "timestamp"}, + "from": reqBody.From, + "_source": []string{"changelog", "id", "metadata", "timestamp"}, "highlight": elastic.BuildSearchHighlight(&reqBody.Highlight), "query": util.MapStr{ "bool": boolQuery, @@ -194,7 +190,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http reqBody.Sort = []string{"timestamp", "desc"} } - query["sort"] = []util.MapStr{ + query["sort"] = []util.MapStr{ { reqBody.Sort[0]: util.MapStr{ "order": reqBody.Sort[1], @@ -206,8 +202,8 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetWildcardIndexName(event.Activity{}), dsl) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) return } w.Write(response.RawResult.Body) -} \ No newline at end of file +} diff --git a/modules/elastic/api/alias.go b/modules/elastic/api/alias.go index 7c86aaa3..9203f2d5 100644 --- a/modules/elastic/api/alias.go +++ b/modules/elastic/api/alias.go @@ -33,9 +33,9 @@ import ( "net/http" ) -func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ +func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) @@ -43,8 +43,8 @@ func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request, return } - if !exists{ - errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID) log.Error(errStr) h.WriteError(w, errStr, http.StatusInternalServerError) return @@ -105,4 +105,4 @@ func (h *APIHandler) HandleGetAliasAction(w http.ResponseWriter, req *http.Reque return } h.WriteJSON(w, res, http.StatusOK) -} \ No newline at end of file +} diff --git a/modules/elastic/api/cluster_overview.go b/modules/elastic/api/cluster_overview.go index 39979af3..fb54e929 100644 --- a/modules/elastic/api/cluster_overview.go +++ b/modules/elastic/api/cluster_overview.go @@ -118,7 +118,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request, var ( // cluster_id => cluster_uuid - clustersM = map[string]string{} + clustersM = map[string]string{} clusterUUIDs []string ) for _, cid := range clusterIDs { @@ -145,28 +145,27 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request, indexMetricItems := []GroupMetricItem{} metricItem := newMetricItem("cluster_indexing", 2, "cluster") metricItem.OnlyPrimary = true - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "cluster_indexing", - Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", - ID: util.GetUUID(), + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "cluster_indexing", + Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: metricItem, - FormatType: "num", - Units: "doc/s", + MetricItem: metricItem, + FormatType: "num", + Units: "doc/s", }) metricItem = newMetricItem("cluster_search", 2, "cluster") - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "cluster_search", - Field: "payload.elasticsearch.node_stats.indices.search.query_total", - ID: util.GetUUID(), + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "cluster_search", + Field: "payload.elasticsearch.node_stats.indices.search.query_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: metricItem, - FormatType: "num", - Units: "query/s", + MetricItem: metricItem, + FormatType: "num", + Units: "query/s", }) - clusterID := global.MustLookupString(elastic.GlobalSystemElasticsearchID) intervalField, err := getDateHistogramIntervalField(clusterID, bucketSizeStr) if err != nil { @@ -200,23 +199,23 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request, { "range": util.MapStr{ "timestamp": util.MapStr{ - "gte": fmt.Sprintf("now-%ds", metricLen * bucketSize), + "gte": fmt.Sprintf("now-%ds", metricLen*bucketSize), }, }, }, }, }, } - aggs:=map[string]interface{}{} + aggs := map[string]interface{}{} sumAggs := util.MapStr{} - for _,metricItem:=range indexMetricItems { + for _, metricItem := range indexMetricItems { leafAgg := util.MapStr{ - "max":util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field, }, } - var sumBucketPath = "term_node>"+ metricItem.ID + var sumBucketPath = "term_node>" + metricItem.ID aggs[metricItem.ID] = leafAgg sumAggs[metricItem.ID] = util.MapStr{ @@ -224,22 +223,22 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request, "buckets_path": sumBucketPath, }, } - if metricItem.IsDerivative{ - sumAggs[metricItem.ID+"_deriv"]=util.MapStr{ - "derivative":util.MapStr{ + if metricItem.IsDerivative { + sumAggs[metricItem.ID+"_deriv"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID, }, } } } - sumAggs["term_node"]= util.MapStr{ + sumAggs["term_node"] = util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.node_id", - "size": 1000, + "size": 1000, }, "aggs": aggs, } - query["aggs"]= util.MapStr{ + query["aggs"] = util.MapStr{ "group_by_level": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.cluster_uuid", @@ -247,11 +246,11 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request, }, "aggs": util.MapStr{ "dates": util.MapStr{ - "date_histogram":util.MapStr{ - "field": "timestamp", + "date_histogram": util.MapStr{ + "field": "timestamp", intervalField: bucketSizeStr, }, - "aggs":sumAggs, + "aggs": sumAggs, }, }, }, @@ -279,12 +278,12 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request, for _, line := range indexMetrics["cluster_indexing"].Lines { // remove first metric dot data := line.Data - if v, ok := data.([][]interface{}); ok && len(v)> 0 { + if v, ok := data.([][]interface{}); ok && len(v) > 0 { // remove first metric dot temp := v[1:] // // remove first last dot if len(temp) > 0 { - temp = temp[0: len(temp)-1] + temp = temp[0 : len(temp)-1] } data = temp } @@ -293,12 +292,12 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request, searchMetricData := util.MapStr{} for _, line := range indexMetrics["cluster_search"].Lines { data := line.Data - if v, ok := data.([][]interface{}); ok && len(v)> 0 { + if v, ok := data.([][]interface{}); ok && len(v) > 0 { // remove first metric dot temp := v[1:] // // remove first last dot if len(temp) > 0 { - temp = temp[0: len(temp)-1] + temp = temp[0 : len(temp)-1] } data = temp } @@ -633,7 +632,6 @@ func (h *APIHandler) GetClusterNodes(w http.ResponseWriter, req *http.Request, p } } - if v, ok := nodeID.(string); ok { nodeInfos[v] = util.MapStr{ "timestamp": hitM["timestamp"], @@ -642,7 +640,7 @@ func (h *APIHandler) GetClusterNodes(w http.ResponseWriter, req *http.Request, p "load_1m": load, "heap.percent": heapUsage, "disk.avail": availDisk, - "disk.used": usedDisk, + "disk.used": usedDisk, "uptime": uptime, } @@ -865,14 +863,14 @@ type RealtimeNodeInfo struct { IndexQPS interface{} `json:"index_qps"` QueryQPS interface{} `json:"query_qps"` IndexBytesQPS interface{} `json:"index_bytes_qps"` - Timestamp uint64 `json:"timestamp"` + Timestamp uint64 `json:"timestamp"` CatNodeResponse } func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) { ver := h.Client().GetVersion() - bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) - intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) + bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) + intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) if err != nil { return nil, err } @@ -891,18 +889,18 @@ func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map "aggs": util.MapStr{ "date": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ "term_shard": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.shard_id", - "size": 1000, + "size": 1000, }, "aggs": util.MapStr{ "filter_pri": util.MapStr{ - "filter": util.MapStr{ "term": util.MapStr{ "payload.elasticsearch.shard_stats.routing.primary": true } }, + "filter": util.MapStr{"term": util.MapStr{"payload.elasticsearch.shard_stats.routing.primary": true}}, "aggs": util.MapStr{ "index_total": util.MapStr{ "max": util.MapStr{ @@ -994,8 +992,8 @@ func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName string, bucketSizeInSeconds int) (map[string]util.MapStr, error) { ver := h.Client().GetVersion() - bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) - intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) + bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) + intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) if err != nil { return nil, err } @@ -1048,7 +1046,7 @@ func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName st "aggs": util.MapStr{ "date": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -1108,8 +1106,8 @@ func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName st func (h *APIHandler) getNodeQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) { ver := h.Client().GetVersion() - bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) - intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) + bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) + intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) if err != nil { return nil, err } @@ -1128,7 +1126,7 @@ func (h *APIHandler) getNodeQPS(clusterID string, bucketSizeInSeconds int) (map[ "aggs": util.MapStr{ "date": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -1238,11 +1236,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ { "match": util.MapStr{ reqBody.SearchField: util.MapStr{ - "query": reqBody.Keyword, - "fuzziness": "AUTO", - "max_expansions": 10, - "prefix_length": 2, - "boost": 2, + "query": reqBody.Keyword, + "fuzziness": "AUTO", + "max_expansions": 10, + "prefix_length": 2, + "boost": 2, }, }, }, @@ -1284,11 +1282,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ { "match": util.MapStr{ "search_text": util.MapStr{ - "query": reqBody.Keyword, - "fuzziness": "AUTO", - "max_expansions": 10, - "prefix_length": 2, - "boost": 2, + "query": reqBody.Keyword, + "fuzziness": "AUTO", + "max_expansions": 10, + "prefix_length": 2, + "boost": 2, }, }, }, @@ -1357,7 +1355,7 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req id := ps.ByName("id") collectionMode := GetMonitorState(id) ret := util.MapStr{ - "cluster_id": id, + "cluster_id": id, "metric_collection_mode": collectionMode, } queryDSL := util.MapStr{ @@ -1382,7 +1380,7 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req "grp_name": util.MapStr{ "terms": util.MapStr{ "field": "metadata.name", - "size": 10, + "size": 10, }, "aggs": util.MapStr{ "max_timestamp": util.MapStr{ @@ -1405,11 +1403,11 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req key := bk["key"].(string) if tv, ok := bk["max_timestamp"].(map[string]interface{}); ok { if collectionMode == elastic.ModeAgentless { - if util.StringInArray([]string{ "index_stats", "cluster_health", "cluster_stats", "node_stats"}, key) { + if util.StringInArray([]string{"index_stats", "cluster_health", "cluster_stats", "node_stats"}, key) { ret[key] = getCollectionStats(tv["value"]) } - }else{ - if util.StringInArray([]string{ "shard_stats", "cluster_health", "cluster_stats", "node_stats"}, key) { + } else { + if util.StringInArray([]string{"shard_stats", "cluster_health", "cluster_stats", "node_stats"}, key) { ret[key] = getCollectionStats(tv["value"]) } } @@ -1422,15 +1420,15 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req func getCollectionStats(lastActiveAt interface{}) util.MapStr { stats := util.MapStr{ "last_active_at": lastActiveAt, - "status": "active", + "status": "active", } if timestamp, ok := lastActiveAt.(float64); ok { t := time.Unix(int64(timestamp/1000), 0) - if time.Now().Sub(t) > 5 * time.Minute { + if time.Now().Sub(t) > 5*time.Minute { stats["status"] = "warning" - }else{ + } else { stats["status"] = "ok" } } return stats -} \ No newline at end of file +} diff --git a/modules/elastic/api/discover.go b/modules/elastic/api/discover.go index 92b03ff8..f30626f1 100644 --- a/modules/elastic/api/discover.go +++ b/modules/elastic/api/discover.go @@ -39,7 +39,7 @@ import ( func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) @@ -47,16 +47,16 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ return } - if !exists{ - errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID) log.Error(errStr) h.WriteError(w, errStr, http.StatusNotFound) return } - var reqParams = struct{ - Index string `json:"index"` - Body map[string]interface{} `json:"body"` + var reqParams = struct { + Index string `json:"index"` + Body map[string]interface{} `json:"body"` DistinctByField map[string]interface{} `json:"distinct_by_field"` }{} @@ -101,12 +101,12 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ if qm, ok := query.(map[string]interface{}); ok { filter, _ := util.MapStr(qm).GetValue("bool.filter") - if fv, ok := filter.([]interface{}); ok{ + if fv, ok := filter.([]interface{}); ok { fv = append(fv, util.MapStr{ "script": util.MapStr{ "script": util.MapStr{ "source": "distinct_by_field", - "lang": "infini", + "lang": "infini", "params": reqParams.DistinctByField, }, }, @@ -173,7 +173,7 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ if timeout != "" { queryArgs = &[]util.KV{ { - Key: "timeout", + Key: "timeout", Value: timeout, }, } @@ -184,7 +184,7 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ } var cancel context.CancelFunc // here add one second for network delay - ctx, cancel = context.WithTimeout(context.Background(), du + time.Second) + ctx, cancel = context.WithTimeout(context.Background(), du+time.Second) defer cancel() } @@ -207,12 +207,10 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ h.Write(w, searchRes.RawResult.Body) } - func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string]interface{}{ - } + resBody := map[string]interface{}{} targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) @@ -221,16 +219,16 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt return } - if !exists{ - errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID) h.WriteError(w, errStr, http.StatusNotFound) return } - var reqParams = struct{ + var reqParams = struct { BoolFilter interface{} `json:"boolFilter"` - FieldName string `json:"field"` - Query string `json:"query"` + FieldName string `json:"field"` + Query string `json:"query"` }{} err = h.DecodeJSON(req, &reqParams) if err != nil { @@ -246,7 +244,7 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt indices, hasAll := h.GetAllowedIndices(req, targetClusterID) if !hasAll { if len(indices) == 0 { - h.WriteJSON(w, values,http.StatusOK) + h.WriteJSON(w, values, http.StatusOK) return } boolQ["must"] = []util.MapStr{ @@ -265,15 +263,15 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt "aggs": util.MapStr{ "suggestions": util.MapStr{ "terms": util.MapStr{ - "field": reqParams.FieldName, - "include": reqParams.Query + ".*", + "field": reqParams.FieldName, + "include": reqParams.Query + ".*", "execution_hint": "map", - "shard_size": 10, + "shard_size": 10, }, }, }, } - var queryBodyBytes = util.MustToJSONBytes(queryBody) + var queryBodyBytes = util.MustToJSONBytes(queryBody) searchRes, err := client.SearchWithRawQueryDSL(indexName, queryBodyBytes) if err != nil { @@ -285,7 +283,7 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt for _, bucket := range searchRes.Aggregations["suggestions"].Buckets { values = append(values, bucket["key"]) } - h.WriteJSON(w, values,http.StatusOK) + h.WriteJSON(w, values, http.StatusOK) } func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { @@ -293,7 +291,7 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http. traceIndex := h.GetParameterOrDefault(req, "traceIndex", orm.GetIndexName(elastic.TraceMeta{})) traceField := h.GetParameterOrDefault(req, "traceField", "trace_id") targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) @@ -301,8 +299,8 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http. return } - if !exists{ - errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID) h.WriteError(w, errStr, http.StatusNotFound) return } @@ -340,4 +338,3 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http. } h.WriteJSON(w, indexNames, http.StatusOK) } - diff --git a/modules/elastic/api/host.go b/modules/elastic/api/host.go index f0ba251f..39264493 100644 --- a/modules/elastic/api/host.go +++ b/modules/elastic/api/host.go @@ -211,8 +211,7 @@ func (h *APIHandler) getDiscoverHosts(w http.ResponseWriter, req *http.Request, func getHostSummary(agentIDs []string, metricName string, summary map[string]util.MapStr) error { if summary == nil { - summary = map[string]util.MapStr{ - } + summary = map[string]util.MapStr{} } if len(agentIDs) == 0 { @@ -506,8 +505,7 @@ func (h *APIHandler) FetchHostInfo(w http.ResponseWriter, req *http.Request, ps for key, item := range hostMetrics { for _, line := range item.Lines { if _, ok := networkMetrics[line.Metric.Label]; !ok { - networkMetrics[line.Metric.Label] = util.MapStr{ - } + networkMetrics[line.Metric.Label] = util.MapStr{} } networkMetrics[line.Metric.Label][key] = line.Data } @@ -682,20 +680,20 @@ func (h *APIHandler) getSingleHostMetricFromNode(ctx context.Context, nodeID str } const ( - OSCPUUsedPercentMetricKey = "cpu_used_percent" - MemoryUsedPercentMetricKey = "memory_used_percent" - DiskUsedPercentMetricKey = "disk_used_percent" - SystemLoadMetricKey = "system_load" - CPUIowaitMetricKey = "cpu_iowait" - SwapMemoryUsedPercentMetricKey= "swap_memory_used_percent" - NetworkSummaryMetricKey = "network_summary" - NetworkPacketsSummaryMetricKey = "network_packets_summary" - DiskReadRateMetricKey = "disk_read_rate" - DiskWriteRateMetricKey = "disk_write_rate" - DiskPartitionUsageMetricKey = "disk_partition_usage" + OSCPUUsedPercentMetricKey = "cpu_used_percent" + MemoryUsedPercentMetricKey = "memory_used_percent" + DiskUsedPercentMetricKey = "disk_used_percent" + SystemLoadMetricKey = "system_load" + CPUIowaitMetricKey = "cpu_iowait" + SwapMemoryUsedPercentMetricKey = "swap_memory_used_percent" + NetworkSummaryMetricKey = "network_summary" + NetworkPacketsSummaryMetricKey = "network_packets_summary" + DiskReadRateMetricKey = "disk_read_rate" + DiskWriteRateMetricKey = "disk_write_rate" + DiskPartitionUsageMetricKey = "disk_partition_usage" NetworkInterfaceOutputRateMetricKey = "network_interface_output_rate" - ) + func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { hostID := ps.MustGetParameter("host_id") hostInfo := &host.HostInfo{} @@ -798,7 +796,7 @@ func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Reque metricItem.AddLine("Disk Write Rate", "Disk Write Rate", "network write rate of host.", "group1", "payload.host.diskio_summary.write.bytes", "max", bucketSizeStr, "%", "bytes", "0,0.[00]", "0,0.[00]", false, true) metricItems = append(metricItems, metricItem) case DiskPartitionUsageMetricKey, NetworkInterfaceOutputRateMetricKey: - resBody["metrics"] , err = h.getGroupHostMetrics(ctx, hostInfo.AgentID, min, max, bucketSize, key) + resBody["metrics"], err = h.getGroupHostMetrics(ctx, hostInfo.AgentID, min, max, bucketSize, key) if err != nil { log.Error(err) h.WriteError(w, err, http.StatusInternalServerError) diff --git a/modules/elastic/api/ilm.go b/modules/elastic/api/ilm.go index 54cc038b..49925214 100644 --- a/modules/elastic/api/ilm.go +++ b/modules/elastic/api/ilm.go @@ -35,7 +35,7 @@ import ( "net/http" ) -func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ +func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { clusterID := ps.MustGetParameter("id") esClient := elastic.GetClient(clusterID) policies, err := esClient.GetILMPolicy("") @@ -47,7 +47,7 @@ func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.R h.WriteJSON(w, policies, http.StatusOK) } -func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ +func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { clusterID := ps.MustGetParameter("id") policy := ps.MustGetParameter("policy") esClient := elastic.GetClient(clusterID) @@ -66,7 +66,7 @@ func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http. h.WriteAckOKJSON(w) } -func (h *APIHandler) HandleDeleteILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ +func (h *APIHandler) HandleDeleteILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { clusterID := ps.MustGetParameter("id") policy := ps.MustGetParameter("policy") esClient := elastic.GetClient(clusterID) @@ -77,4 +77,4 @@ func (h *APIHandler) HandleDeleteILMPolicyAction(w http.ResponseWriter, req *htt return } h.WriteAckOKJSON(w) -} \ No newline at end of file +} diff --git a/modules/elastic/api/index_metrics.go b/modules/elastic/api/index_metrics.go index b73545d6..38b1ebb4 100644 --- a/modules/elastic/api/index_metrics.go +++ b/modules/elastic/api/index_metrics.go @@ -39,8 +39,8 @@ import ( "time" ) -//getClusterUUID reads the cluster uuid from metadata -func (h *APIHandler) getClusterUUID(clusterID string) (string, error){ +// getClusterUUID reads the cluster uuid from metadata +func (h *APIHandler) getClusterUUID(clusterID string) (string, error) { meta := elastic.GetMetadata(clusterID) if meta == nil { return "", fmt.Errorf("metadata of cluster [%s] was not found", clusterID) @@ -48,16 +48,16 @@ func (h *APIHandler) getClusterUUID(clusterID string) (string, error){ return meta.Config.ClusterUUID, nil } -func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, shardID string, metricKey string) (map[string]*common.MetricItem, error){ - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) +func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, shardID string, metricKey string) (map[string]*common.MetricItem, error) { + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) clusterUUID, err := h.getClusterUUID(clusterID) if err != nil { return nil, err } should := []util.MapStr{ { - "term":util.MapStr{ - "metadata.labels.cluster_id":util.MapStr{ + "term": util.MapStr{ + "metadata.labels.cluster_id": util.MapStr{ "value": clusterID, }, }, @@ -65,8 +65,8 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu } if clusterUUID != "" { should = append(should, util.MapStr{ - "term":util.MapStr{ - "metadata.labels.cluster_uuid":util.MapStr{ + "term": util.MapStr{ + "metadata.labels.cluster_uuid": util.MapStr{ "value": clusterUUID, }, }, @@ -107,11 +107,11 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu if !hasAllPrivilege && len(allowedIndices) == 0 { return nil, nil } - if !hasAllPrivilege{ + if !hasAllPrivilege { namePattern := radix.Compile(allowedIndices...) var filterNames []string for _, name := range indexNames { - if namePattern.Match(name){ + if namePattern.Match(name) { filterNames = append(filterNames, name) } } @@ -122,7 +122,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu } top = len(indexNames) - }else{ + } else { indexNames, err = h.getTopIndexName(req, clusterID, top, 15) if err != nil { return nil, err @@ -137,140 +137,140 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu }) } - query:=map[string]interface{}{} + query := map[string]interface{}{} indexMetricItems := []GroupMetricItem{} switch metricKey { case v1.IndexStorageMetricKey: //索引存储大小 indexStorageMetric := newMetricItem(v1.IndexStorageMetricKey, 1, StorageGroupKey) - indexStorageMetric.AddAxi("Index storage","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + indexStorageMetric.AddAxi("Index storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "index_storage", - Field: "payload.elasticsearch.shard_stats.store.size_in_bytes", - ID: util.GetUUID(), + Key: "index_storage", + Field: "payload.elasticsearch.shard_stats.store.size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: indexStorageMetric, - FormatType: "bytes", - Units: "", + MetricItem: indexStorageMetric, + FormatType: "bytes", + Units: "", }) case v1.SegmentCountMetricKey: // segment 数量 - segmentCountMetric:=newMetricItem(v1.SegmentCountMetricKey, 15, StorageGroupKey) - segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "segment_count", - Field: "payload.elasticsearch.shard_stats.segments.count", - ID: util.GetUUID(), + segmentCountMetric := newMetricItem(v1.SegmentCountMetricKey, 15, StorageGroupKey) + segmentCountMetric.AddAxi("segment count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "segment_count", + Field: "payload.elasticsearch.shard_stats.segments.count", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentCountMetric, - FormatType: "num", - Units: "", + MetricItem: segmentCountMetric, + FormatType: "num", + Units: "", }) case v1.DocCountMetricKey: //索引文档个数 docCountMetric := newMetricItem(v1.DocCountMetricKey, 2, DocumentGroupKey) - docCountMetric.AddAxi("Doc count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + docCountMetric.AddAxi("Doc count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "doc_count", - Field: "payload.elasticsearch.shard_stats.docs.count", - ID: util.GetUUID(), + Key: "doc_count", + Field: "payload.elasticsearch.shard_stats.docs.count", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: docCountMetric, - FormatType: "num", - Units: "", + MetricItem: docCountMetric, + FormatType: "num", + Units: "", }) case v1.DocsDeletedMetricKey: // docs 删除数量 - docsDeletedMetric:=newMetricItem(v1.DocsDeletedMetricKey, 17, DocumentGroupKey) - docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "docs_deleted", - Field: "payload.elasticsearch.shard_stats.docs.deleted", - ID: util.GetUUID(), + docsDeletedMetric := newMetricItem(v1.DocsDeletedMetricKey, 17, DocumentGroupKey) + docsDeletedMetric.AddAxi("docs deleted", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "docs_deleted", + Field: "payload.elasticsearch.shard_stats.docs.deleted", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: docsDeletedMetric, - FormatType: "num", - Units: "", + MetricItem: docsDeletedMetric, + FormatType: "num", + Units: "", }) case v1.QueryTimesMetricKey: //查询次数 queryTimesMetric := newMetricItem("query_times", 2, OperationGroupKey) - queryTimesMetric.AddAxi("Query times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + queryTimesMetric.AddAxi("Query times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "query_times", - Field: "payload.elasticsearch.shard_stats.search.query_total", - ID: util.GetUUID(), + Key: "query_times", + Field: "payload.elasticsearch.shard_stats.search.query_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryTimesMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: queryTimesMetric, + FormatType: "num", + Units: "requests/s", }) case v1.FetchTimesMetricKey: //Fetch次数 fetchTimesMetric := newMetricItem(v1.FetchTimesMetricKey, 3, OperationGroupKey) - fetchTimesMetric.AddAxi("Fetch times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + fetchTimesMetric.AddAxi("Fetch times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "fetch_times", - Field: "payload.elasticsearch.shard_stats.search.fetch_total", - ID: util.GetUUID(), + Key: "fetch_times", + Field: "payload.elasticsearch.shard_stats.search.fetch_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: fetchTimesMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: fetchTimesMetric, + FormatType: "num", + Units: "requests/s", }) case v1.ScrollTimesMetricKey: //scroll 次数 scrollTimesMetric := newMetricItem(v1.ScrollTimesMetricKey, 4, OperationGroupKey) - scrollTimesMetric.AddAxi("scroll times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + scrollTimesMetric.AddAxi("scroll times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "scroll_times", - Field: "payload.elasticsearch.shard_stats.search.scroll_total", - ID: util.GetUUID(), + Key: "scroll_times", + Field: "payload.elasticsearch.shard_stats.search.scroll_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: scrollTimesMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: scrollTimesMetric, + FormatType: "num", + Units: "requests/s", }) case v1.MergeTimesMetricKey: //Merge次数 mergeTimesMetric := newMetricItem(v1.MergeTimesMetricKey, 7, OperationGroupKey) - mergeTimesMetric.AddAxi("Merge times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + mergeTimesMetric.AddAxi("Merge times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "merge_times", - Field: "payload.elasticsearch.shard_stats.merges.total", - ID: util.GetUUID(), + Key: "merge_times", + Field: "payload.elasticsearch.shard_stats.merges.total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: mergeTimesMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: mergeTimesMetric, + FormatType: "num", + Units: "requests/s", }) case v1.RefreshTimesMetricKey: //Refresh次数 refreshTimesMetric := newMetricItem(v1.RefreshTimesMetricKey, 5, OperationGroupKey) - refreshTimesMetric.AddAxi("Refresh times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + refreshTimesMetric.AddAxi("Refresh times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "refresh_times", - Field: "payload.elasticsearch.shard_stats.refresh.total", - ID: util.GetUUID(), + Key: "refresh_times", + Field: "payload.elasticsearch.shard_stats.refresh.total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: refreshTimesMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: refreshTimesMetric, + FormatType: "num", + Units: "requests/s", }) case v1.FlushTimesMetricKey: //flush 次数 flushTimesMetric := newMetricItem(v1.FlushTimesMetricKey, 6, OperationGroupKey) - flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + flushTimesMetric.AddAxi("flush times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "flush_times", - Field: "payload.elasticsearch.shard_stats.flush.total", - ID: util.GetUUID(), + Key: "flush_times", + Field: "payload.elasticsearch.shard_stats.flush.total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: flushTimesMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: flushTimesMetric, + FormatType: "num", + Units: "requests/s", }) case v1.IndexingRateMetricKey: //写入速率 @@ -278,22 +278,22 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu if shardID == "" { indexingRateMetric.OnlyPrimary = true } - indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + indexingRateMetric.AddAxi("Indexing rate", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "indexing_rate", - Field: "payload.elasticsearch.shard_stats.indexing.index_total", - ID: util.GetUUID(), + Key: "indexing_rate", + Field: "payload.elasticsearch.shard_stats.indexing.index_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexingRateMetric, - FormatType: "num", - Units: "doc/s", + MetricItem: indexingRateMetric, + FormatType: "num", + Units: "doc/s", }) case v1.IndexingBytesMetricKey: indexingBytesMetric := newMetricItem(v1.IndexingBytesMetricKey, 2, OperationGroupKey) if shardID == "" { indexingBytesMetric.OnlyPrimary = true } - indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + indexingBytesMetric.AddAxi("Indexing bytes", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ Key: "indexing_bytes", Field: "payload.elasticsearch.shard_stats.store.size_in_bytes", @@ -309,366 +309,365 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu if shardID == "" { indexingLatencyMetric.OnlyPrimary = true } - indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + indexingLatencyMetric.AddAxi("Indexing latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "indexing_latency", - Field: "payload.elasticsearch.shard_stats.indexing.index_time_in_millis", + Key: "indexing_latency", + Field: "payload.elasticsearch.shard_stats.indexing.index_time_in_millis", Field2: "payload.elasticsearch.shard_stats.indexing.index_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexingLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: indexingLatencyMetric, + FormatType: "num", + Units: "ms", }) case v1.QueryLatencyMetricKey: //查询时延 queryLatencyMetric := newMetricItem(v1.QueryLatencyMetricKey, 2, LatencyGroupKey) - queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + queryLatencyMetric.AddAxi("Query latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "query_latency", - Field: "payload.elasticsearch.shard_stats.search.query_time_in_millis", + Key: "query_latency", + Field: "payload.elasticsearch.shard_stats.search.query_time_in_millis", Field2: "payload.elasticsearch.shard_stats.search.query_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: queryLatencyMetric, + FormatType: "num", + Units: "ms", }) case FetchLatencyMetricKey: //fetch时延 fetchLatencyMetric := newMetricItem(v1.FetchLatencyMetricKey, 3, LatencyGroupKey) - fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + fetchLatencyMetric.AddAxi("Fetch latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "fetch_latency", - Field: "payload.elasticsearch.shard_stats.search.fetch_time_in_millis", + Key: "fetch_latency", + Field: "payload.elasticsearch.shard_stats.search.fetch_time_in_millis", Field2: "payload.elasticsearch.shard_stats.search.fetch_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: fetchLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: fetchLatencyMetric, + FormatType: "num", + Units: "ms", }) case v1.MergeLatencyMetricKey: //merge时延 mergeLatencyMetric := newMetricItem(v1.MergeLatencyMetricKey, 7, LatencyGroupKey) - mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + mergeLatencyMetric.AddAxi("Merge latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "merge_latency", - Field: "payload.elasticsearch.shard_stats.merges.total_time_in_millis", + Key: "merge_latency", + Field: "payload.elasticsearch.shard_stats.merges.total_time_in_millis", Field2: "payload.elasticsearch.shard_stats.merges.total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: mergeLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: mergeLatencyMetric, + FormatType: "num", + Units: "ms", }) case RefreshLatencyMetricKey: //refresh时延 refreshLatencyMetric := newMetricItem(v1.RefreshLatencyMetricKey, 5, LatencyGroupKey) - refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + refreshLatencyMetric.AddAxi("Refresh latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "refresh_latency", - Field: "payload.elasticsearch.shard_stats.refresh.total_time_in_millis", + Key: "refresh_latency", + Field: "payload.elasticsearch.shard_stats.refresh.total_time_in_millis", Field2: "payload.elasticsearch.shard_stats.refresh.total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: refreshLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: refreshLatencyMetric, + FormatType: "num", + Units: "ms", }) case v1.ScrollLatencyMetricKey: //scroll时延 scrollLatencyMetric := newMetricItem(v1.ScrollLatencyMetricKey, 4, LatencyGroupKey) - scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + scrollLatencyMetric.AddAxi("Scroll Latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "scroll_latency", - Field: "payload.elasticsearch.shard_stats.search.scroll_time_in_millis", + Key: "scroll_latency", + Field: "payload.elasticsearch.shard_stats.search.scroll_time_in_millis", Field2: "payload.elasticsearch.shard_stats.search.scroll_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: scrollLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: scrollLatencyMetric, + FormatType: "num", + Units: "ms", }) case v1.FlushLatencyMetricKey: //flush 时延 flushLatencyMetric := newMetricItem(v1.FlushLatencyMetricKey, 6, LatencyGroupKey) - flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + flushLatencyMetric.AddAxi("Flush latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "flush_latency", - Field: "payload.elasticsearch.shard_stats.flush.total_time_in_millis", + Key: "flush_latency", + Field: "payload.elasticsearch.shard_stats.flush.total_time_in_millis", Field2: "payload.elasticsearch.shard_stats.flush.total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: flushLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: flushLatencyMetric, + FormatType: "num", + Units: "ms", }) case v1.QueryCacheMetricKey: //queryCache queryCacheMetric := newMetricItem(v1.QueryCacheMetricKey, 1, CacheGroupKey) - queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + queryCacheMetric.AddAxi("Query cache", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "query_cache", - Field: "payload.elasticsearch.shard_stats.query_cache.memory_size_in_bytes", - ID: util.GetUUID(), + Key: "query_cache", + Field: "payload.elasticsearch.shard_stats.query_cache.memory_size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: queryCacheMetric, - FormatType: "bytes", - Units: "", + MetricItem: queryCacheMetric, + FormatType: "bytes", + Units: "", }) case v1.RequestCacheMetricKey: //requestCache requestCacheMetric := newMetricItem(v1.RequestCacheMetricKey, 2, CacheGroupKey) - requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + requestCacheMetric.AddAxi("request cache", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "request_cache", - Field: "payload.elasticsearch.shard_stats.request_cache.memory_size_in_bytes", - ID: util.GetUUID(), + Key: "request_cache", + Field: "payload.elasticsearch.shard_stats.request_cache.memory_size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: requestCacheMetric, - FormatType: "bytes", - Units: "", + MetricItem: requestCacheMetric, + FormatType: "bytes", + Units: "", }) case v1.RequestCacheHitMetricKey: // Request Cache Hit - requestCacheHitMetric:=newMetricItem(v1.RequestCacheHitMetricKey, 6, CacheGroupKey) - requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "request_cache_hit", - Field: "payload.elasticsearch.shard_stats.request_cache.hit_count", - ID: util.GetUUID(), + requestCacheHitMetric := newMetricItem(v1.RequestCacheHitMetricKey, 6, CacheGroupKey) + requestCacheHitMetric.AddAxi("request cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "request_cache_hit", + Field: "payload.elasticsearch.shard_stats.request_cache.hit_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: requestCacheHitMetric, - FormatType: "num", - Units: "hits", + MetricItem: requestCacheHitMetric, + FormatType: "num", + Units: "hits", }) - case v1.RequestCacheMissMetricKey: + case v1.RequestCacheMissMetricKey: // Request Cache Miss - requestCacheMissMetric:=newMetricItem(v1.RequestCacheMissMetricKey, 8, CacheGroupKey) - requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "request_cache_miss", - Field: "payload.elasticsearch.shard_stats.request_cache.miss_count", - ID: util.GetUUID(), + requestCacheMissMetric := newMetricItem(v1.RequestCacheMissMetricKey, 8, CacheGroupKey) + requestCacheMissMetric.AddAxi("request cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "request_cache_miss", + Field: "payload.elasticsearch.shard_stats.request_cache.miss_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: requestCacheMissMetric, - FormatType: "num", - Units: "misses", + MetricItem: requestCacheMissMetric, + FormatType: "num", + Units: "misses", }) case v1.QueryCacheCountMetricKey: // Query Cache Count - queryCacheCountMetric:=newMetricItem(v1.QueryCacheCountMetricKey, 4, CacheGroupKey) - queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "query_cache_count", - Field: "payload.elasticsearch.shard_stats.query_cache.cache_count", - ID: util.GetUUID(), + queryCacheCountMetric := newMetricItem(v1.QueryCacheCountMetricKey, 4, CacheGroupKey) + queryCacheCountMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "query_cache_count", + Field: "payload.elasticsearch.shard_stats.query_cache.cache_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryCacheCountMetric, - FormatType: "num", - Units: "", + MetricItem: queryCacheCountMetric, + FormatType: "num", + Units: "", }) case v1.QueryCacheHitMetricKey: // Query Cache Miss - queryCacheHitMetric:=newMetricItem(v1.QueryCacheHitMetricKey, 5, CacheGroupKey) - queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "query_cache_hit", - Field: "payload.elasticsearch.shard_stats.query_cache.hit_count", - ID: util.GetUUID(), + queryCacheHitMetric := newMetricItem(v1.QueryCacheHitMetricKey, 5, CacheGroupKey) + queryCacheHitMetric.AddAxi("query cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "query_cache_hit", + Field: "payload.elasticsearch.shard_stats.query_cache.hit_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryCacheHitMetric, - FormatType: "num", - Units: "hits", + MetricItem: queryCacheHitMetric, + FormatType: "num", + Units: "hits", }) case v1.QueryCacheMissMetricKey: // Query Cache Miss - queryCacheMissMetric:=newMetricItem(v1.QueryCacheMissMetricKey, 7, CacheGroupKey) - queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "query_cache_miss", - Field: "payload.elasticsearch.shard_stats.query_cache.miss_count", - ID: util.GetUUID(), + queryCacheMissMetric := newMetricItem(v1.QueryCacheMissMetricKey, 7, CacheGroupKey) + queryCacheMissMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "query_cache_miss", + Field: "payload.elasticsearch.shard_stats.query_cache.miss_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryCacheMissMetric, - FormatType: "num", - Units: "misses", + MetricItem: queryCacheMissMetric, + FormatType: "num", + Units: "misses", }) case v1.FielddataCacheMetricKey: // Fielddata内存占用大小 - fieldDataCacheMetric:=newMetricItem(v1.FielddataCacheMetricKey, 3, CacheGroupKey) - fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "fielddata_cache", - Field: "payload.elasticsearch.shard_stats.fielddata.memory_size_in_bytes", - ID: util.GetUUID(), + fieldDataCacheMetric := newMetricItem(v1.FielddataCacheMetricKey, 3, CacheGroupKey) + fieldDataCacheMetric.AddAxi("FieldData Cache", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "fielddata_cache", + Field: "payload.elasticsearch.shard_stats.fielddata.memory_size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: fieldDataCacheMetric, - FormatType: "bytes", - Units: "", + MetricItem: fieldDataCacheMetric, + FormatType: "bytes", + Units: "", }) case v1.SegmentMemoryMetricKey: //segment memory segmentMemoryMetric := newMetricItem(v1.SegmentMemoryMetricKey, 13, MemoryGroupKey) - segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentMemoryMetric.AddAxi("Segment memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "segment_memory", - Field: "payload.elasticsearch.shard_stats.segments.memory_in_bytes", - ID: util.GetUUID(), + Key: "segment_memory", + Field: "payload.elasticsearch.shard_stats.segments.memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentMemoryMetric, + FormatType: "bytes", + Units: "", }) case v1.SegmentDocValuesMemoryMetricKey: //segment doc values memory docValuesMemoryMetric := newMetricItem(v1.SegmentDocValuesMemoryMetricKey, 13, MemoryGroupKey) - docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + docValuesMemoryMetric.AddAxi("Segment Doc values Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "segment_doc_values_memory", - Field: "payload.elasticsearch.shard_stats.segments.doc_values_memory_in_bytes", - ID: util.GetUUID(), + Key: "segment_doc_values_memory", + Field: "payload.elasticsearch.shard_stats.segments.doc_values_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: docValuesMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: docValuesMemoryMetric, + FormatType: "bytes", + Units: "", }) case v1.SegmentTermsMemoryMetricKey: //segment terms memory termsMemoryMetric := newMetricItem(v1.SegmentTermsMemoryMetricKey, 13, MemoryGroupKey) - termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + termsMemoryMetric.AddAxi("Segment Terms Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "segment_terms_memory", - Field: "payload.elasticsearch.shard_stats.segments.terms_memory_in_bytes", - ID: util.GetUUID(), + Key: "segment_terms_memory", + Field: "payload.elasticsearch.shard_stats.segments.terms_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: termsMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: termsMemoryMetric, + FormatType: "bytes", + Units: "", }) case v1.SegmentFieldsMemoryMetricKey: //segment fields memory fieldsMemoryMetric := newMetricItem(v1.SegmentFieldsMemoryMetricKey, 13, MemoryGroupKey) - fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + fieldsMemoryMetric.AddAxi("Segment Fields Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "segment_fields_memory", - Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes", - ID: util.GetUUID(), + Key: "segment_fields_memory", + Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: fieldsMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: fieldsMemoryMetric, + FormatType: "bytes", + Units: "", }) case v1.SegmentIndexWriterMemoryMetricKey: // segment index writer memory - segmentIndexWriterMemoryMetric:=newMetricItem(v1.SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey) - segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "segment_index_writer_memory", - Field: "payload.elasticsearch.shard_stats.segments.index_writer_memory_in_bytes", - ID: util.GetUUID(), + segmentIndexWriterMemoryMetric := newMetricItem(v1.SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey) + segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "segment_index_writer_memory", + Field: "payload.elasticsearch.shard_stats.segments.index_writer_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentIndexWriterMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentIndexWriterMemoryMetric, + FormatType: "bytes", + Units: "", }) case v1.SegmentTermVectorsMemoryMetricKey: // segment term vectors memory - segmentTermVectorsMemoryMetric:=newMetricItem(v1.SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey) - segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "segment_term_vectors_memory", - Field: "payload.elasticsearch.shard_stats.segments.term_vectors_memory_in_bytes", - ID: util.GetUUID(), + segmentTermVectorsMemoryMetric := newMetricItem(v1.SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey) + segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "segment_term_vectors_memory", + Field: "payload.elasticsearch.shard_stats.segments.term_vectors_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentTermVectorsMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentTermVectorsMemoryMetric, + FormatType: "bytes", + Units: "", }) case v1.SegmentNormsMetricKey: segmentNormsMetric := newMetricItem(v1.SegmentNormsMetricKey, 17, MemoryGroupKey) - segmentNormsMetric.AddAxi("Segment norms memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentNormsMetric.AddAxi("Segment norms memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: v1.SegmentNormsMetricKey, - Field: "payload.elasticsearch.shard_stats.segments.norms_memory_in_bytes", - ID: util.GetUUID(), + Key: v1.SegmentNormsMetricKey, + Field: "payload.elasticsearch.shard_stats.segments.norms_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentNormsMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentNormsMetric, + FormatType: "bytes", + Units: "", }) case v1.SegmentPointsMetricKey: segmentPointsMetric := newMetricItem(v1.SegmentPointsMetricKey, 18, MemoryGroupKey) - segmentPointsMetric.AddAxi("Segment points memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentPointsMetric.AddAxi("Segment points memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: v1.SegmentPointsMetricKey, - Field: "payload.elasticsearch.shard_stats.segments.points_memory_in_bytes", - ID: util.GetUUID(), + Key: v1.SegmentPointsMetricKey, + Field: "payload.elasticsearch.shard_stats.segments.points_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentPointsMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentPointsMetric, + FormatType: "bytes", + Units: "", }) case v1.VersionMapMetricKey: segmentVersionMapMetric := newMetricItem(v1.VersionMapMetricKey, 18, MemoryGroupKey) - segmentVersionMapMetric.AddAxi("Segment version map memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentVersionMapMetric.AddAxi("Segment version map memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: v1.VersionMapMetricKey, - Field: "payload.elasticsearch.shard_stats.segments.version_map_memory_in_bytes", - ID: util.GetUUID(), + Key: v1.VersionMapMetricKey, + Field: "payload.elasticsearch.shard_stats.segments.version_map_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentVersionMapMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentVersionMapMetric, + FormatType: "bytes", + Units: "", }) case v1.FixedBitSetMetricKey: segmentFixedBitSetMetric := newMetricItem(v1.FixedBitSetMetricKey, 18, MemoryGroupKey) - segmentFixedBitSetMetric.AddAxi("Segment fixed bit set memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentFixedBitSetMetric.AddAxi("Segment fixed bit set memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: v1.FixedBitSetMetricKey, - Field: "payload.elasticsearch.shard_stats.segments.fixed_bit_set_memory_in_bytes", - ID: util.GetUUID(), + Key: v1.FixedBitSetMetricKey, + Field: "payload.elasticsearch.shard_stats.segments.fixed_bit_set_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentFixedBitSetMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentFixedBitSetMetric, + FormatType: "bytes", + Units: "", }) } - - aggs:=map[string]interface{}{} + aggs := map[string]interface{}{} sumAggs := util.MapStr{} - for _,metricItem:=range indexMetricItems { + for _, metricItem := range indexMetricItems { leafAgg := util.MapStr{ - "max":util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field, }, } - var sumBucketPath = "term_shard>"+ metricItem.ID - aggs[metricItem.ID]= leafAgg + var sumBucketPath = "term_shard>" + metricItem.ID + aggs[metricItem.ID] = leafAgg sumAggs[metricItem.ID] = util.MapStr{ "sum_bucket": util.MapStr{ @@ -676,39 +675,39 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu }, } - if metricItem.Field2 != ""{ + if metricItem.Field2 != "" { leafAgg2 := util.MapStr{ - "max":util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field2, }, } aggs[metricItem.ID+"_field2"] = leafAgg2 - sumAggs[metricItem.ID + "_field2"] = util.MapStr{ + sumAggs[metricItem.ID+"_field2"] = util.MapStr{ "sum_bucket": util.MapStr{ "buckets_path": sumBucketPath + "_field2", }, } } - if metricItem.IsDerivative{ - sumAggs[metricItem.ID+"_deriv"]=util.MapStr{ - "derivative":util.MapStr{ + if metricItem.IsDerivative { + sumAggs[metricItem.ID+"_deriv"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID, }, } if metricItem.Field2 != "" { - sumAggs[metricItem.ID + "_deriv_field2"]=util.MapStr{ - "derivative":util.MapStr{ + sumAggs[metricItem.ID+"_deriv_field2"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID + "_field2", }, } } } } - sumAggs["term_shard"]= util.MapStr{ + sumAggs["term_shard"] = util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.shard_id", - "size": 10000, + "size": 10000, }, "aggs": aggs, } @@ -728,11 +727,11 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu }, }) } - query["query"]=util.MapStr{ + query["query"] = util.MapStr{ "bool": util.MapStr{ - "must": must, + "must": must, "minimum_should_match": 1, - "should": should, + "should": should, "must_not": []util.MapStr{ { "term": util.MapStr{ @@ -754,8 +753,8 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu }, }, } - query["size"]=0 - query["aggs"]= util.MapStr{ + query["size"] = 0 + query["aggs"] = util.MapStr{ "group_by_level": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.index_name", @@ -767,11 +766,11 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu }, "aggs": util.MapStr{ "dates": util.MapStr{ - "date_histogram":util.MapStr{ - "field": "timestamp", + "date_histogram": util.MapStr{ + "field": "timestamp", intervalField: bucketSizeStr, }, - "aggs":sumAggs, + "aggs": sumAggs, }, "max_store_bucket_sort": util.MapStr{ "bucket_sort": util.MapStr{ @@ -805,7 +804,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu } -func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error){ +func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error) { ver := h.Client().GetVersion() cr, _ := util.VersionCompare(ver.Number, "6.1") if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && cr == -1 { @@ -813,8 +812,8 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in } var ( now = time.Now() - max = now.UnixNano()/1e6 - min = now.Add(-time.Duration(lastMinutes) * time.Minute).UnixNano()/1e6 + max = now.UnixNano() / 1e6 + min = now.Add(-time.Duration(lastMinutes)*time.Minute).UnixNano() / 1e6 ) clusterUUID, err := h.getClusterUUID(clusterID) if err != nil { @@ -859,8 +858,8 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in if !hasAllPrivilege { must = append(must, util.MapStr{ "query_string": util.MapStr{ - "query": strings.Join(allowedIndices, " "), - "fields": []string{"metadata.labels.index_name"}, + "query": strings.Join(allowedIndices, " "), + "fields": []string{"metadata.labels.index_name"}, "default_operator": "OR", }, }) @@ -884,8 +883,8 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in }, }, }, - "must": must, - "should": should, + "must": must, + "should": should, "minimum_should_match": 1, "filter": []util.MapStr{ { @@ -920,7 +919,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in }, "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -972,7 +971,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in }, "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -1005,20 +1004,20 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in }, }, } - response,err:=elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(),util.MustToJSONBytes(query)) - if err!=nil{ + response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query)) + if err != nil { log.Error(err) return nil, err } - var maxQpsKVS = map[string] float64{} + var maxQpsKVS = map[string]float64{} for _, agg := range response.Aggregations { for _, bk := range agg.Buckets { key := bk["key"].(string) if maxQps, ok := bk["max_qps"].(map[string]interface{}); ok { val := maxQps["value"].(float64) - if _, ok = maxQpsKVS[key] ; ok { + if _, ok = maxQpsKVS[key]; ok { maxQpsKVS[key] = maxQpsKVS[key] + val - }else{ + } else { maxQpsKVS[key] = val } } @@ -1039,7 +1038,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in length = len(qpsValues) } indexNames := []string{} - for i := 0; i t[j].Value //desc } -func (t TopTermOrder) Swap(i, j int){ +func (t TopTermOrder) Swap(i, j int) { t[i], t[j] = t[j], t[i] } diff --git a/modules/elastic/api/index_overview.go b/modules/elastic/api/index_overview.go index 6efdc23f..32956160 100644 --- a/modules/elastic/api/index_overview.go +++ b/modules/elastic/api/index_overview.go @@ -46,41 +46,41 @@ import ( ) func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody:=util.MapStr{} - reqBody := struct{ - Keyword string `json:"keyword"` - Size int `json:"size"` - From int `json:"from"` - Aggregations []elastic.SearchAggParam `json:"aggs"` - Highlight elastic.SearchHighlightParam `json:"highlight"` - Filter elastic.SearchFilterParam `json:"filter"` - Sort []string `json:"sort"` - SearchField string `json:"search_field"` + resBody := util.MapStr{} + reqBody := struct { + Keyword string `json:"keyword"` + Size int `json:"size"` + From int `json:"from"` + Aggregations []elastic.SearchAggParam `json:"aggs"` + Highlight elastic.SearchHighlightParam `json:"highlight"` + Filter elastic.SearchFilterParam `json:"filter"` + Sort []string `json:"sort"` + SearchField string `json:"search_field"` }{} err := h.DecodeJSON(req, &reqBody) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) return } aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations) aggs["term_cluster_id"] = util.MapStr{ "terms": util.MapStr{ "field": "metadata.cluster_id", - "size": 1000, + "size": 1000, }, "aggs": util.MapStr{ "term_cluster_name": util.MapStr{ "terms": util.MapStr{ "field": "metadata.cluster_name", - "size": 1, + "size": 1, }, }, }, } filter := elastic.BuildSearchTermFilter(reqBody.Filter) var should []util.MapStr - if reqBody.SearchField != ""{ + if reqBody.SearchField != "" { should = []util.MapStr{ { "prefix": util.MapStr{ @@ -103,8 +103,8 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques }, }, } - }else{ - if reqBody.Keyword != ""{ + } else { + if reqBody.Keyword != "" { should = []util.MapStr{ { "prefix": util.MapStr{ @@ -149,15 +149,13 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques } } - must := []interface{}{ - } - if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri { - if indexFilter != nil{ + must := []interface{}{} + if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri { + if indexFilter != nil { must = append(must, indexFilter) } - }else{ - h.WriteJSON(w, elastic.SearchResponse{ - }, http.StatusOK) + } else { + h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK) return } boolQuery := util.MapStr{ @@ -169,7 +167,7 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques }, }, "filter": filter, - "must": must, + "must": must, } if len(should) > 0 { boolQuery["should"] = should @@ -178,7 +176,7 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques query := util.MapStr{ "aggs": aggs, "size": reqBody.Size, - "from": reqBody.From, + "from": reqBody.From, "highlight": elastic.BuildSearchHighlight(&reqBody.Highlight), "query": util.MapStr{ "bool": boolQuery, @@ -192,7 +190,7 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques }, } if len(reqBody.Sort) > 1 { - query["sort"] = []util.MapStr{ + query["sort"] = []util.MapStr{ { reqBody.Sort[0]: util.MapStr{ "order": reqBody.Sort[1], @@ -204,14 +202,14 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.IndexConfig{}), dsl) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) return } w.Write(util.MustToJSONBytes(response)) } -func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool){ +func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool) { hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req) if !hasAllPrivilege && len(indexPrivilege) == 0 { return nil, false @@ -221,10 +219,10 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool for clusterID, indices := range indexPrivilege { var ( wildcardIndices []string - normalIndices []string + normalIndices []string ) for _, index := range indices { - if strings.Contains(index,"*") { + if strings.Contains(index, "*") { wildcardIndices = append(wildcardIndices, index) continue } @@ -234,8 +232,8 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool if len(wildcardIndices) > 0 { subShould = append(subShould, util.MapStr{ "query_string": util.MapStr{ - "query": strings.Join(wildcardIndices, " "), - "fields": []string{"metadata.index_name"}, + "query": strings.Join(wildcardIndices, " "), + "fields": []string{"metadata.index_name"}, "default_operator": "OR", }, }) @@ -260,7 +258,7 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool { "bool": util.MapStr{ "minimum_should_match": 1, - "should": subShould, + "should": subShould, }, }, }, @@ -270,14 +268,14 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool indexFilter := util.MapStr{ "bool": util.MapStr{ "minimum_should_match": 1, - "should": indexShould, + "should": indexShould, }, } return indexFilter, true } return nil, true } -func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { +func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { var indexIDs []interface{} h.DecodeJSON(req, &indexIDs) @@ -288,8 +286,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p indexIDs = indexIDs[0:1] // map indexIDs(cluster_id:index_name => cluster_uuid:indexName) var ( - indexIDM = map[string]string{} - newIndexIDs []interface{} + indexIDM = map[string]string{} + newIndexIDs []interface{} clusterIndexNames = map[string][]string{} ) indexID := indexIDs[0] @@ -318,12 +316,12 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p return } clusterIndexNames[firstClusterID] = append(clusterIndexNames[firstClusterID], firstIndexName) - }else{ + } else { h.WriteError(w, fmt.Sprintf("invalid index_id: %v", indexID), http.StatusInternalServerError) return } for clusterID, indexNames := range clusterIndexNames { - clusterUUID, err := adapter.GetClusterUUID(clusterID) + clusterUUID, err := adapter.GetClusterUUID(clusterID) if err != nil { log.Warnf("get cluster uuid error: %v", err) continue @@ -382,7 +380,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p } if primary == true { indexInfo.Shards++ - }else{ + } else { indexInfo.Replicas++ } indexInfo.Timestamp = hitM["timestamp"] @@ -403,36 +401,36 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p } var metricLen = 15 // 索引速率 - indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) + indexMetric := newMetricItem("indexing", 1, OperationGroupKey) indexMetric.OnlyPrimary = true - indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) + indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) nodeMetricItems := []GroupMetricItem{} - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "indexing", - Field: "payload.elasticsearch.shard_stats.indexing.index_total", - ID: util.GetUUID(), + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "indexing", + Field: "payload.elasticsearch.shard_stats.indexing.index_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexMetric, - FormatType: "num", - Units: "Indexing/s", + MetricItem: indexMetric, + FormatType: "num", + Units: "Indexing/s", }) - queryMetric:=newMetricItem("search", 2, OperationGroupKey) - queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "search", - Field: "payload.elasticsearch.shard_stats.search.query_total", - ID: util.GetUUID(), + queryMetric := newMetricItem("search", 2, OperationGroupKey) + queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "search", + Field: "payload.elasticsearch.shard_stats.search.query_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryMetric, - FormatType: "num", - Units: "Search/s", + MetricItem: queryMetric, + FormatType: "num", + Units: "Search/s", }) - aggs:=map[string]interface{}{} - query :=map[string]interface{}{} - query["query"]=util.MapStr{ + aggs := map[string]interface{}{} + query := map[string]interface{}{} + query["query"] = util.MapStr{ "bool": util.MapStr{ - "must": []util.MapStr{ + "must": []util.MapStr{ { "term": util.MapStr{ "metadata.category": util.MapStr{ @@ -462,7 +460,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p { "range": util.MapStr{ "timestamp": util.MapStr{ - "gte": fmt.Sprintf("now-%ds", metricLen * bucketSize), + "gte": fmt.Sprintf("now-%ds", metricLen*bucketSize), }, }, }, @@ -471,18 +469,18 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p } sumAggs := util.MapStr{} - for _,metricItem:=range nodeMetricItems{ + for _, metricItem := range nodeMetricItems { leafAgg := util.MapStr{ - "max":util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field, }, } - var sumBucketPath = "term_shard>"+ metricItem.ID + var sumBucketPath = "term_shard>" + metricItem.ID if metricItem.MetricItem.OnlyPrimary { filterSubAggs := util.MapStr{ metricItem.ID: leafAgg, } - aggs["filter_pri"]=util.MapStr{ + aggs["filter_pri"] = util.MapStr{ "filter": util.MapStr{ "term": util.MapStr{ "payload.elasticsearch.shard_stats.routing.primary": util.MapStr{ @@ -492,8 +490,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p }, "aggs": filterSubAggs, } - sumBucketPath = "term_shard>filter_pri>"+ metricItem.ID - }else{ + sumBucketPath = "term_shard>filter_pri>" + metricItem.ID + } else { aggs[metricItem.ID] = leafAgg } @@ -502,18 +500,18 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p "buckets_path": sumBucketPath, }, } - if metricItem.IsDerivative{ - sumAggs[metricItem.ID+"_deriv"]=util.MapStr{ - "derivative":util.MapStr{ + if metricItem.IsDerivative { + sumAggs[metricItem.ID+"_deriv"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID, }, } } } - sumAggs["term_shard"]= util.MapStr{ + sumAggs["term_shard"] = util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.shard_id", - "size": 10000, + "size": 10000, }, "aggs": aggs, } @@ -523,8 +521,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p if err != nil { panic(err) } - query["size"]=0 - query["aggs"]= util.MapStr{ + query["size"] = 0 + query["aggs"] = util.MapStr{ "group_by_level": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.index_id", @@ -532,11 +530,11 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p }, "aggs": util.MapStr{ "dates": util.MapStr{ - "date_histogram":util.MapStr{ - "field": "timestamp", + "date_histogram": util.MapStr{ + "field": "timestamp", intervalField: bucketSizeStr, }, - "aggs":sumAggs, + "aggs": sumAggs, }, }, }, @@ -549,9 +547,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p indexMetrics := map[string]util.MapStr{} for key, item := range metrics { for _, line := range item.Lines { - if _, ok := indexMetrics[line.Metric.Label]; !ok{ - indexMetrics[line.Metric.Label] = util.MapStr{ - } + if _, ok := indexMetrics[line.Metric.Label]; !ok { + indexMetrics[line.Metric.Label] = util.MapStr{} } indexMetrics[line.Metric.Label][key] = line.Data } @@ -601,11 +598,11 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h indexID := ps.MustGetParameter("index") parts := strings.Split(indexID, ":") if len(parts) > 1 && !h.IsIndexAllowed(req, clusterID, parts[1]) { - h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) + h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) return } if len(parts) < 2 { - h.WriteError(w, "invalid index id: "+ indexID, http.StatusInternalServerError) + h.WriteError(w, "invalid index id: "+indexID, http.StatusInternalServerError) return } @@ -635,7 +632,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h return } q1 := orm.Query{ - Size: 1000, + Size: 1000, WildcardIndex: true, } q1.Conds = orm.And( @@ -651,9 +648,9 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h summary := util.MapStr{} hit := response.Hits.Hits[0].Source var ( - shardsNum int + shardsNum int replicasNum int - indexInfo = util.MapStr{ + indexInfo = util.MapStr{ "index": parts[1], } ) @@ -683,7 +680,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h storeInBytes, _ := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "shard_stats", "store", "size_in_bytes"}, resultM) if docs, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "shard_stats", "docs", "count"}, resultM); ok { //summary["docs"] = docs - if v, ok := docs.(float64); ok && primary == true{ + if v, ok := docs.(float64); ok && primary == true { shardSum.DocsCount += int64(v) } } @@ -695,7 +692,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h } if primary == true { shardSum.Shards++ - }else{ + } else { shardSum.Replicas++ } } @@ -706,7 +703,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h indexInfo["store_size"] = util.FormatBytes(float64(shardSum.StoreInBytes), 1) indexInfo["shards"] = shardSum.Shards + shardSum.Replicas - summary["unassigned_shards"] = (replicasNum + 1) * shardsNum - shardSum.Shards - shardSum.Replicas + summary["unassigned_shards"] = (replicasNum+1)*shardsNum - shardSum.Shards - shardSum.Replicas } summary["index_info"] = indexInfo @@ -721,7 +718,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps } indexName := ps.MustGetParameter("index") q1 := orm.Query{ - Size: 1000, + Size: 1000, WildcardIndex: true, } clusterUUID, err := adapter.GetClusterUUID(clusterID) @@ -742,7 +739,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps err, result := orm.Search(&event.Event{}, &q1) if err != nil { log.Error(err) - h.WriteError(w,err.Error(), http.StatusInternalServerError ) + h.WriteError(w, err.Error(), http.StatusInternalServerError) return } var shards = []interface{}{} @@ -756,7 +753,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps err, nodesResult := orm.Search(elastic.NodeConfig{}, q) if err != nil { log.Error(err) - h.WriteError(w,err.Error(), http.StatusInternalServerError ) + h.WriteError(w, err.Error(), http.StatusInternalServerError) return } nodeIDToName := util.MapStr{} @@ -803,7 +800,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps primary, _ := shardM.GetValue("routing.primary") if primary == true { shardInfo["prirep"] = "p" - }else{ + } else { shardInfo["prirep"] = "r" } shardInfo["state"], _ = shardM.GetValue("routing.state") @@ -880,11 +877,11 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ var metricType string if metricKey == v1.IndexHealthMetricKey { metricType = v1.MetricTypeClusterHealth - }else{ + } else { //for agent mode metricType = v1.MetricTypeNodeStats } - bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, metricType,60) + bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, metricType, 60) if err != nil { log.Error(err) resBody["error"] = err @@ -892,7 +889,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ return } if bucketSize <= 60 { - min = min - int64(2 * bucketSize * 1000) + min = min - int64(2*bucketSize*1000) } timeout := h.GetParameterOrDefault(req, "timeout", "60s") du, err := time.ParseDuration(timeout) @@ -947,14 +944,14 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ return } metrics["shard_state"] = shardStateMetric - }else if metricKey == v1.IndexHealthMetricKey { - healthMetric, err := h.GetIndexHealthMetric(ctx, clusterID, indexName, min, max, bucketSize) - if err != nil { - log.Error(err) - h.WriteError(w, err, http.StatusInternalServerError) - return - } - metrics["index_health"] = healthMetric + } else if metricKey == v1.IndexHealthMetricKey { + healthMetric, err := h.GetIndexHealthMetric(ctx, clusterID, indexName, min, max, bucketSize) + if err != nil { + log.Error(err) + h.WriteError(w, err, http.StatusInternalServerError) + return + } + metrics["index_health"] = healthMetric } else { switch metricKey { case v1.IndexThroughputMetricKey: @@ -1037,7 +1034,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ minBucketSize, err := v1.GetMetricMinBucketSize(clusterID, metricType) if err != nil { log.Error(err) - }else{ + } else { metrics[metricKey].MinBucketSize = int64(minBucketSize) } } @@ -1047,8 +1044,8 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ h.WriteJSON(w, resBody, http.StatusOK) } -func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){ - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) +func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int) (*common.MetricItem, error) { + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) if err != nil { return nil, err @@ -1101,14 +1098,14 @@ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName str "aggs": util.MapStr{ "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ "groups": util.MapStr{ "terms": util.MapStr{ "field": "payload.elasticsearch.shard_stats.routing.state", - "size": 10, + "size": 10, }, }, }, @@ -1122,8 +1119,8 @@ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName str return nil, err } - metricItem:=newMetricItem("shard_state", 0, "") - metricItem.AddLine("Shard State","Shard State","","group1","payload.elasticsearch.shard_stats.routing.state","max",bucketSizeStr,"","ratio","0.[00]","0.[00]",false,false) + metricItem := newMetricItem("shard_state", 0, "") + metricItem.AddLine("Shard State", "Shard State", "", "group1", "payload.elasticsearch.shard_stats.routing.state", "max", bucketSizeStr, "", "ratio", "0.[00]", "0.[00]", false, false) metricData := []interface{}{} if response.StatusCode == 200 { @@ -1140,7 +1137,7 @@ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName str } func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string] interface{}{} + resBody := map[string]interface{}{} id := ps.ByName("id") indexName := ps.ByName("index") if !h.IsIndexAllowed(req, id, indexName) { @@ -1149,7 +1146,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps }, http.StatusForbidden) return } - q := &orm.Query{ Size: 1} + q := &orm.Query{Size: 1} q.AddSort("timestamp", orm.DESC) q.Conds = orm.And( orm.Eq("metadata.category", "elasticsearch"), @@ -1161,13 +1158,13 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps err, result := orm.Search(event.Event{}, q) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } namesM := util.MapStr{} if len(result.Result) > 0 { if data, ok := result.Result[0].(map[string]interface{}); ok { if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_routing_table"}, data); exists { - if table, ok := routingTable.(map[string]interface{}); ok{ + if table, ok := routingTable.(map[string]interface{}); ok { if shardsM, ok := table["shards"].(map[string]interface{}); ok { for _, rows := range shardsM { if rowsArr, ok := rows.([]interface{}); ok { @@ -1189,12 +1186,12 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps } //node uuid - nodeIds := make([]interface{}, 0, len(namesM) ) + nodeIds := make([]interface{}, 0, len(namesM)) for name, _ := range namesM { nodeIds = append(nodeIds, name) } - q1 := &orm.Query{ Size: 100} + q1 := &orm.Query{Size: 100} q1.AddSort("timestamp", orm.DESC) q1.Conds = orm.And( orm.Eq("metadata.category", "elasticsearch"), @@ -1204,7 +1201,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps err, result = orm.Search(elastic.NodeConfig{}, q1) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } nodes := []interface{}{} for _, hit := range result.Result { @@ -1224,11 +1221,11 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps if v, ok := nodeId.(string); ok { ninfo := util.MapStr{ - "id": v, - "name": nodeName, - "ip": ip, - "port": port, - "status": status, + "id": v, + "name": nodeName, + "ip": ip, + "port": port, + "status": status, "timestamp": hitM["timestamp"], } nodes = append(nodes, ninfo) @@ -1249,7 +1246,7 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr } var must = []util.MapStr{} - if !util.StringInArray(ids, "*"){ + if !util.StringInArray(ids, "*") { must = append(must, util.MapStr{ "terms": util.MapStr{ @@ -1260,9 +1257,8 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr if keyword != "" { must = append(must, util.MapStr{ - "wildcard":util.MapStr{ - "metadata.index_name": - util.MapStr{"value": fmt.Sprintf("*%s*", keyword)}, + "wildcard": util.MapStr{ + "metadata.index_name": util.MapStr{"value": fmt.Sprintf("*%s*", keyword)}, }, }) } @@ -1288,7 +1284,6 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr }, } - esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) indexName := orm.GetIndexName(elastic.IndexConfig{}) resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl)) @@ -1310,22 +1305,22 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr return } -//deleteIndexMetadata used to delete index metadata after index is deleted from cluster +// deleteIndexMetadata used to delete index metadata after index is deleted from cluster func (h APIHandler) deleteIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) indexName := orm.GetIndexName(elastic.IndexConfig{}) - must := []util.MapStr{ + must := []util.MapStr{ { "term": util.MapStr{ "metadata.labels.state": "delete", }, }, } - if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri { + if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri { if indexFilter != nil { must = append(must, indexFilter) } - }else{ + } else { //has no any index permission, just return h.WriteAckOKJSON(w) return diff --git a/modules/elastic/api/manage.go b/modules/elastic/api/manage.go index b33c74b9..12a3d379 100644 --- a/modules/elastic/api/manage.go +++ b/modules/elastic/api/manage.go @@ -540,7 +540,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http ClusterStorageMetricKey, ClusterIndicesMetricKey, ClusterNodeCountMetricKey: - metricType = v1.MetricTypeClusterStats + metricType = v1.MetricTypeClusterStats case ClusterHealthMetricKey: metricType = v1.MetricTypeClusterStats case ShardCountMetricKey: @@ -570,7 +570,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http defer cancel() if util.StringInArray([]string{v1.IndexThroughputMetricKey, v1.SearchThroughputMetricKey, v1.IndexLatencyMetricKey, v1.SearchLatencyMetricKey}, key) { metrics, err = h.GetClusterIndexMetrics(ctx, id, bucketSize, min, max, key) - }else{ + } else { metrics, err = h.GetClusterMetrics(ctx, id, bucketSize, min, max, key) } if err != nil { @@ -583,7 +583,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http minBucketSize, err := v1.GetMetricMinBucketSize(id, metricType) if err != nil { log.Error(err) - }else{ + } else { metrics[key].MinBucketSize = int64(minBucketSize) } } @@ -634,7 +634,7 @@ func (h *APIHandler) HandleNodeMetricsAction(w http.ResponseWriter, req *http.Re minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats) if err != nil { log.Error(err) - }else{ + } else { metrics[key].MinBucketSize = int64(minBucketSize) } } @@ -751,7 +751,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R } } - }else{ + } else { metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, key) if err != nil { log.Error(err) @@ -764,7 +764,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats) if err != nil { log.Error(err) - }else{ + } else { metrics[key].MinBucketSize = int64(minBucketSize) } } @@ -822,7 +822,7 @@ func (h *APIHandler) HandleQueueMetricsAction(w http.ResponseWriter, req *http.R minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats) if err != nil { log.Error(err) - }else{ + } else { metrics[key].MinBucketSize = int64(minBucketSize) } } @@ -949,20 +949,20 @@ const ( ) const ( - ClusterStorageMetricKey = "cluster_storage" + ClusterStorageMetricKey = "cluster_storage" ClusterDocumentsMetricKey = "cluster_documents" - ClusterIndicesMetricKey = "cluster_indices" + ClusterIndicesMetricKey = "cluster_indices" ClusterNodeCountMetricKey = "node_count" - ClusterHealthMetricKey = "cluster_health" - ShardCountMetricKey = "shard_count" - CircuitBreakerMetricKey = "circuit_breaker" + ClusterHealthMetricKey = "cluster_health" + ShardCountMetricKey = "shard_count" + CircuitBreakerMetricKey = "circuit_breaker" ) func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) { var ( - clusterMetricsResult = map[string]*common.MetricItem {} - err error + clusterMetricsResult = map[string]*common.MetricItem{} + err error ) switch metricKey { case ClusterDocumentsMetricKey, @@ -1277,7 +1277,7 @@ func (h *APIHandler) getCircuitBreakerMetric(ctx context.Context, id string, min "query": util.MapStr{ "bool": util.MapStr{ "minimum_should_match": 1, - "should": should, + "should": should, "must": []util.MapStr{ { "term": util.MapStr{ diff --git a/modules/elastic/api/metrics_util.go b/modules/elastic/api/metrics_util.go index 73441d69..2bb6cc5f 100644 --- a/modules/elastic/api/metrics_util.go +++ b/modules/elastic/api/metrics_util.go @@ -112,7 +112,7 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{} func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) (map[string]*common.MetricItem, error) { bucketSizeStr := fmt.Sprintf("%vs", bucketSize) queryDSL := util.MustToJSONBytes(query) - response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(),nil, queryDSL) + response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) if err != nil { return nil, err } @@ -205,12 +205,12 @@ func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{ dataKey = dataKey + "_deriv" } line.Data = grpMetricData[dataKey][line.Metric.Label] - if v, ok := line.Data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 { + if v, ok := line.Data.([][]interface{}); ok && len(v) > 0 && bucketSize <= 60 { // remove first metric dot temp := v[1:] // // remove first last dot if len(temp) > 0 { - temp = temp[0: len(temp)-1] + temp = temp[0 : len(temp)-1] } line.Data = temp } @@ -369,9 +369,9 @@ func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common }, } queryDSL := util.MustToJSONBytes(query) - response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) + response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) if err != nil { - return nil, err + return nil, err } var minDate, maxDate int64 @@ -429,12 +429,12 @@ func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common for _, line := range metricItem.Lines { line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate} line.Data = metricData[line.Metric.GetDataKey()] - if v, ok := line.Data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 { + if v, ok := line.Data.([][]interface{}); ok && len(v) > 0 && bucketSize <= 60 { // remove first metric dot temp := v[1:] // // remove first last dot if len(temp) > 0 { - temp = temp[0: len(temp)-1] + temp = temp[0 : len(temp)-1] } line.Data = temp } @@ -912,13 +912,13 @@ func parseGroupMetricData(buckets []elastic.BucketBase, isPercent bool) ([]inter if bkMap, ok := statusBk.(map[string]interface{}); ok { statusKey := bkMap["key"].(string) count := bkMap["doc_count"].(float64) - if isPercent{ + if isPercent { metricData = append(metricData, map[string]interface{}{ "x": dateTime, "y": count / totalCount * 100, "g": statusKey, }) - }else{ + } else { metricData = append(metricData, map[string]interface{}{ "x": dateTime, "y": count, @@ -950,12 +950,12 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri "field": line.Metric.Field, }, } - var sumBucketPath = "term_node>"+ line.Metric.ID + var sumBucketPath = "term_node>" + line.Metric.ID aggs[line.Metric.ID] = leafAgg sumAggs[line.Metric.ID] = util.MapStr{ "sum_bucket": util.MapStr{ - "buckets_path": sumBucketPath, + "buckets_path": sumBucketPath, }, } if line.Metric.Field2 != "" { @@ -966,9 +966,9 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri } aggs[line.Metric.ID+"_field2"] = leafAgg2 - sumAggs[line.Metric.ID + "_field2"] = util.MapStr{ + sumAggs[line.Metric.ID+"_field2"] = util.MapStr{ "sum_bucket": util.MapStr{ - "buckets_path": sumBucketPath+"_field2", + "buckets_path": sumBucketPath + "_field2", }, } } @@ -991,10 +991,10 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri } } - sumAggs["term_node"]= util.MapStr{ + sumAggs["term_node"] = util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.node_id", - "size": 1000, + "size": 1000, }, "aggs": aggs, } @@ -1015,7 +1015,7 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri "aggs": sumAggs, }, } - return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap) + return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize, metricData, metricItemsMap) } func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) (map[string]*common.MetricItem, error) { @@ -1035,11 +1035,11 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c "field": line.Metric.Field, }, } - var sumBucketPath = "term_shard>"+ line.Metric.ID + var sumBucketPath = "term_shard>" + line.Metric.ID aggs[line.Metric.ID] = leafAgg sumAggs[line.Metric.ID] = util.MapStr{ "sum_bucket": util.MapStr{ - "buckets_path": sumBucketPath, + "buckets_path": sumBucketPath, }, } if line.Metric.Field2 != "" { @@ -1050,9 +1050,9 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c } aggs[line.Metric.ID+"_field2"] = leafAgg2 - sumAggs[line.Metric.ID + "_field2"] = util.MapStr{ + sumAggs[line.Metric.ID+"_field2"] = util.MapStr{ "sum_bucket": util.MapStr{ - "buckets_path": sumBucketPath+"_field2", + "buckets_path": sumBucketPath + "_field2", }, } } @@ -1075,10 +1075,10 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c } } - sumAggs["term_shard"]= util.MapStr{ + sumAggs["term_shard"] = util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.shard_id", - "size": 100000, + "size": 100000, }, "aggs": aggs, } @@ -1092,7 +1092,7 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c if len(metricItems) > 0 && len(metricItems[0].Lines) > 0 && metricItems[0].Lines[0].Metric.OnlyPrimary { query["query"] = util.MapStr{ "bool": util.MapStr{ - "must": []util.MapStr{ + "must": []util.MapStr{ query["query"].(util.MapStr), {"term": util.MapStr{"payload.elasticsearch.shard_stats.routing.primary": true}}, }, @@ -1109,7 +1109,7 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c "aggs": sumAggs, }, } - return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap) + return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize, metricData, metricItemsMap) } func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int, metricData map[string][][]interface{}, metricItemsMap map[string]*common.MetricLine) (map[string]*common.MetricItem, error) { @@ -1174,12 +1174,12 @@ func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems for _, line := range metricItem.Lines { line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate} line.Data = metricData[line.Metric.GetDataKey()] - if v, ok := line.Data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 { + if v, ok := line.Data.([][]interface{}); ok && len(v) > 0 && bucketSize <= 60 { // remove first metric dot temp := v[1:] // // remove first last dot if len(temp) > 0 { - temp = temp[0: len(temp)-1] + temp = temp[0 : len(temp)-1] } line.Data = temp } @@ -1190,4 +1190,4 @@ func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems } return result, nil -} \ No newline at end of file +} diff --git a/modules/elastic/api/metrics_util_test.go b/modules/elastic/api/metrics_util_test.go index ee60c8d9..86a613d6 100644 --- a/modules/elastic/api/metrics_util_test.go +++ b/modules/elastic/api/metrics_util_test.go @@ -33,83 +33,81 @@ import ( ) func TestGetMetricParams(t *testing.T) { - handler:=APIHandler{} - req, err :=http.NewRequest("GET","https://infinilabs.com/api/?bucket_size=1m",nil) + handler := APIHandler{} + req, err := http.NewRequest("GET", "https://infinilabs.com/api/?bucket_size=1m", nil) if err != nil { t.Fatal(err) } - bucketSize, min, max, err:=handler.GetMetricRangeAndBucketSize(req,"", "",15) + bucketSize, min, max, err := handler.GetMetricRangeAndBucketSize(req, "", "", 15) fmt.Println(bucketSize) - fmt.Println(util.FormatUnixTimestamp(min/1000))//2022-01-27 15:28:57 - fmt.Println(util.FormatUnixTimestamp(max/1000))//2022-01-27 15:28:57 - fmt.Println(time.Now())//2022-01-27 15:28:57 + fmt.Println(util.FormatUnixTimestamp(min / 1000)) //2022-01-27 15:28:57 + fmt.Println(util.FormatUnixTimestamp(max / 1000)) //2022-01-27 15:28:57 + fmt.Println(time.Now()) //2022-01-27 15:28:57 fmt.Println(bucketSize, min, max, err) } func TestConvertBucketItemsToAggQueryParams(t *testing.T) { - bucketItem:=common.BucketItem{} - bucketItem.Key="key1" - bucketItem.Type=common.TermsBucket - bucketItem.Parameters=map[string]interface{}{} - bucketItem.Parameters["field"]="metadata.labels.cluster_id" - bucketItem.Parameters["size"]=2 - - - nestBucket:=common.BucketItem{} - nestBucket.Key="key2" - nestBucket.Type=common.DateHistogramBucket - nestBucket.Parameters=map[string]interface{}{} - nestBucket.Parameters["field"]="timestamp" - nestBucket.Parameters["calendar_interval"]="1d" - nestBucket.Parameters["time_zone"]="+08:00" - - leafBucket:=common.NewBucketItem(common.TermsBucket,util.MapStr{ - "size":5, - "field":"payload.elasticsearch.cluster_health.status", + bucketItem := common.BucketItem{} + bucketItem.Key = "key1" + bucketItem.Type = common.TermsBucket + bucketItem.Parameters = map[string]interface{}{} + bucketItem.Parameters["field"] = "metadata.labels.cluster_id" + bucketItem.Parameters["size"] = 2 + + nestBucket := common.BucketItem{} + nestBucket.Key = "key2" + nestBucket.Type = common.DateHistogramBucket + nestBucket.Parameters = map[string]interface{}{} + nestBucket.Parameters["field"] = "timestamp" + nestBucket.Parameters["calendar_interval"] = "1d" + nestBucket.Parameters["time_zone"] = "+08:00" + + leafBucket := common.NewBucketItem(common.TermsBucket, util.MapStr{ + "size": 5, + "field": "payload.elasticsearch.cluster_health.status", }) - leafBucket.Key="key3" + leafBucket.Key = "key3" - metricItems:=[]*common.MetricItem{} - var bucketSizeStr ="10s" - metricItem:=newMetricItem("cluster_summary", 2, "cluster") - metricItem.Key="key4" - metricItem.AddLine("Indexing","Total Indexing","Number of documents being indexed for primary and replica shards.","group1", - "payload.elasticsearch.index_stats.total.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true) - metricItem.AddLine("Search","Total Search","Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!","group1", - "payload.elasticsearch.index_stats.total.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true) - metricItems=append(metricItems,metricItem) + metricItems := []*common.MetricItem{} + var bucketSizeStr = "10s" + metricItem := newMetricItem("cluster_summary", 2, "cluster") + metricItem.Key = "key4" + metricItem.AddLine("Indexing", "Total Indexing", "Number of documents being indexed for primary and replica shards.", "group1", + "payload.elasticsearch.index_stats.total.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true) + metricItem.AddLine("Search", "Total Search", "Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!", "group1", + "payload.elasticsearch.index_stats.total.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true) + metricItems = append(metricItems, metricItem) nestBucket.AddNestBucket(leafBucket) - nestBucket.Metrics=metricItems + nestBucket.Metrics = metricItems - bucketItem.Buckets=[]*common.BucketItem{} - bucketItem.Buckets=append(bucketItem.Buckets,&nestBucket) + bucketItem.Buckets = []*common.BucketItem{} + bucketItem.Buckets = append(bucketItem.Buckets, &nestBucket) - - aggs:=ConvertBucketItemsToAggQuery([]*common.BucketItem{&bucketItem},nil) + aggs := ConvertBucketItemsToAggQuery([]*common.BucketItem{&bucketItem}, nil) fmt.Println(util.MustToJSON(aggs)) - response:="{ \"took\": 37, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 10000, \"relation\": \"gte\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"key1\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7pqhptj69a0sg3rn05g\", \"doc_count\": 80482, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-28T00:00:00.000+08:00\", \"key\": 1643299200000, \"doc_count\": 14310, \"c7qi5hii4h935v9bs91g\": { \"value\": 15680 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 2985 } }, { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 66172, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 }, \"c7qi5hii4h935v9bs91g_deriv\": { \"value\": 90526 }, \"c7qi5hii4h935v9bs920_deriv\": { \"value\": 17219 } } ] } }, { \"key\": \"c7qi42ai4h92sksk979g\", \"doc_count\": 660, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 660, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 } } ] } } ] } } }" - res:=SearchResponse{} - util.FromJSONBytes([]byte(response),&res) + response := "{ \"took\": 37, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 10000, \"relation\": \"gte\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"key1\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7pqhptj69a0sg3rn05g\", \"doc_count\": 80482, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-28T00:00:00.000+08:00\", \"key\": 1643299200000, \"doc_count\": 14310, \"c7qi5hii4h935v9bs91g\": { \"value\": 15680 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 2985 } }, { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 66172, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 }, \"c7qi5hii4h935v9bs91g_deriv\": { \"value\": 90526 }, \"c7qi5hii4h935v9bs920_deriv\": { \"value\": 17219 } } ] } }, { \"key\": \"c7qi42ai4h92sksk979g\", \"doc_count\": 660, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 660, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 } } ] } } ] } } }" + res := SearchResponse{} + util.FromJSONBytes([]byte(response), &res) fmt.Println(response) - groupKey:="key1" - metricLabelKey:="key2" - metricValueKey:="c7qi5hii4h935v9bs920" - data:=ParseAggregationResult(int(10),res.Aggregations,groupKey,metricLabelKey,metricValueKey) + groupKey := "key1" + metricLabelKey := "key2" + metricValueKey := "c7qi5hii4h935v9bs920" + data := ParseAggregationResult(int(10), res.Aggregations, groupKey, metricLabelKey, metricValueKey) fmt.Println(data) } func TestConvertBucketItems(t *testing.T) { - response:="{ \"took\": 8, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 81, \"relation\": \"eq\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"c7v2gm3i7638vvo4pv80\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7uv7p3i76360kgdmpb0\", \"doc_count\": 81, \"c7v2gm3i7638vvo4pv8g\": { \"buckets\": [ { \"key_as_string\": \"2022-02-05T00:00:00.000+08:00\", \"key\": 1643990400000, \"doc_count\": 81, \"c7v2gm3i7638vvo4pv90\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"yellow\", \"doc_count\": 81 } ] } } ] } } ] } } }" - res:=SearchResponse{} - util.FromJSONBytes([]byte(response),&res) + response := "{ \"took\": 8, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 81, \"relation\": \"eq\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"c7v2gm3i7638vvo4pv80\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7uv7p3i76360kgdmpb0\", \"doc_count\": 81, \"c7v2gm3i7638vvo4pv8g\": { \"buckets\": [ { \"key_as_string\": \"2022-02-05T00:00:00.000+08:00\", \"key\": 1643990400000, \"doc_count\": 81, \"c7v2gm3i7638vvo4pv90\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"yellow\", \"doc_count\": 81 } ] } } ] } } ] } } }" + res := SearchResponse{} + util.FromJSONBytes([]byte(response), &res) - data:=ParseAggregationBucketResult(int(10),res.Aggregations,"c7v2gm3i7638vvo4pv80","c7v2gm3i7638vvo4pv8g","c7v2gm3i7638vvo4pv90", func() { + data := ParseAggregationBucketResult(int(10), res.Aggregations, "c7v2gm3i7638vvo4pv80", "c7v2gm3i7638vvo4pv8g", "c7v2gm3i7638vvo4pv90", func() { }) diff --git a/modules/elastic/api/node_metrics.go b/modules/elastic/api/node_metrics.go index 8f4ac425..0838b222 100644 --- a/modules/elastic/api/node_metrics.go +++ b/modules/elastic/api/node_metrics.go @@ -37,78 +37,78 @@ import ( ) const ( - NodeOSCPUMetricKey = "os_cpu" - NodeProcessCPUMetricKey = "cpu" - OSUsedMemoryMetricKey = "os_used_mem" - OSLoadAverage1mMetricKey = "os_load_average_1m" - OSUsedSwapMetricKey = "os_used_swap" - OpenFileMetricKey = "open_file" - OpenFilePercentMetricKey = "open_file_percent" - TotalDiskMetricKey = "disk" - IndexingRateMetricKey = "indexing_rate" - IndexingBytesMetricKey = "indexing_bytes" - IndexingLatencyMetricKey = "indexing_latency" - QueryRateMetricKey = "query_rate" - QueryLatencyMetricKey = "query_latency" - FetchRateMetricKey = "fetch_rate" - ScrollRateMetricKey = "scroll_rate" - RefreshRateMetricKey = "refresh_rate" - FlushRateMetricKey = "flush_rate" - MergesRateMetricKey = "merges_rate" - FetchLatencyMetricKey = "fetch_latency" - ScrollLatencyMetricKey = "scroll_latency" - MergeLatencyMetricKey = "merge_latency" - RefreshLatencyMetricKey = "refresh_latency" - FlushLatencyMetricKey = "flush_latency" - QueryCacheMetricKey = "query_cache" - RequestCacheMetricKey = "request_cache" - RequestCacheHitMetricKey = "request_cache_hit" - RequestCacheMissMetricKey = "request_cache_miss" - QueryCacheCountMetricKey = "query_cache_count" - QueryCacheMissMetricKey = "query_cache_miss" - QueryCacheHitMetricKey = "query_cache_hit" - FielddataCacheMetricKey = "fielddata_cache" - HttpConnectNumMetricKey = "http_connect_num" - HttpRateMetricKey = "http_rate" - SegmentCountMetricKey = "segment_count" - SegmentMemoryMetricKey = "segment_memory" + NodeOSCPUMetricKey = "os_cpu" + NodeProcessCPUMetricKey = "cpu" + OSUsedMemoryMetricKey = "os_used_mem" + OSLoadAverage1mMetricKey = "os_load_average_1m" + OSUsedSwapMetricKey = "os_used_swap" + OpenFileMetricKey = "open_file" + OpenFilePercentMetricKey = "open_file_percent" + TotalDiskMetricKey = "disk" + IndexingRateMetricKey = "indexing_rate" + IndexingBytesMetricKey = "indexing_bytes" + IndexingLatencyMetricKey = "indexing_latency" + QueryRateMetricKey = "query_rate" + QueryLatencyMetricKey = "query_latency" + FetchRateMetricKey = "fetch_rate" + ScrollRateMetricKey = "scroll_rate" + RefreshRateMetricKey = "refresh_rate" + FlushRateMetricKey = "flush_rate" + MergesRateMetricKey = "merges_rate" + FetchLatencyMetricKey = "fetch_latency" + ScrollLatencyMetricKey = "scroll_latency" + MergeLatencyMetricKey = "merge_latency" + RefreshLatencyMetricKey = "refresh_latency" + FlushLatencyMetricKey = "flush_latency" + QueryCacheMetricKey = "query_cache" + RequestCacheMetricKey = "request_cache" + RequestCacheHitMetricKey = "request_cache_hit" + RequestCacheMissMetricKey = "request_cache_miss" + QueryCacheCountMetricKey = "query_cache_count" + QueryCacheMissMetricKey = "query_cache_miss" + QueryCacheHitMetricKey = "query_cache_hit" + FielddataCacheMetricKey = "fielddata_cache" + HttpConnectNumMetricKey = "http_connect_num" + HttpRateMetricKey = "http_rate" + SegmentCountMetricKey = "segment_count" + SegmentMemoryMetricKey = "segment_memory" SegmentStoredFieldsMemoryMetricKey = "segment_stored_fields_memory" - SegmentTermsMemoryMetricKey = "segment_terms_memory" - SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory" - SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory" - SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory" - DocsCountMetricKey = "docs_count" - DocsDeletedMetricKey = "docs_deleted" - IndexStorageMetricKey = "index_storage" - JVMHeapUsedPercentMetricKey = "jvm_heap_used_percent" - JVMMemYoungUsedMetricKey = "jvm_mem_young_used" - JVMMemYoungPeakUsedMetricKey = "jvm_mem_young_peak_used" - JVMMemOldUsedMetricKey = "jvm_mem_old_used" - JVMMemOldPeakUsedMetricKey = "jvm_mem_old_peak_used" - JVMUsedHeapMetricKey = "jvm_used_heap" - JVMYoungGCRateMetricKey = "jvm_young_gc_rate" - JVMYoungGCLatencyMetricKey = "jvm_young_gc_latency" - JVMOldGCRateMetricKey = "jvm_old_gc_rate" - JVMOldGCLatencyMetricKey = "jvm_old_gc_latency" - TransportTXRateMetricKey = "transport_tx_rate" - TransportRXRateMetricKey = "transport_rx_rate" - TransportTXBytesMetricKey = "transport_tx_bytes" - TransportRXBytesMetricKey = "transport_rx_bytes" - TransportTCPOutboundMetricKey = "transport_outbound_connections" - TotalIOOperationsMetricKey = "total_io_operations" - TotalReadIOOperationsMetricKey = "total_read_io_operations" - TotalWriteIOOperationsMetricKey = "total_write_io_operations" - ScrollOpenContextsMetricKey = "scroll_open_contexts" - ParentBreakerMetricKey = "parent_breaker" - AccountingBreakerMetricKey = "accounting_breaker" - FielddataBreakerMetricKey = "fielddata_breaker" - RequestBreakerMetricKey = "request_breaker" - InFlightRequestsBreakerMetricKey = "in_flight_requests_breaker" - ModelInferenceBreakerMetricKey = "model_inference_breaker" + SegmentTermsMemoryMetricKey = "segment_terms_memory" + SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory" + SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory" + SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory" + DocsCountMetricKey = "docs_count" + DocsDeletedMetricKey = "docs_deleted" + IndexStorageMetricKey = "index_storage" + JVMHeapUsedPercentMetricKey = "jvm_heap_used_percent" + JVMMemYoungUsedMetricKey = "jvm_mem_young_used" + JVMMemYoungPeakUsedMetricKey = "jvm_mem_young_peak_used" + JVMMemOldUsedMetricKey = "jvm_mem_old_used" + JVMMemOldPeakUsedMetricKey = "jvm_mem_old_peak_used" + JVMUsedHeapMetricKey = "jvm_used_heap" + JVMYoungGCRateMetricKey = "jvm_young_gc_rate" + JVMYoungGCLatencyMetricKey = "jvm_young_gc_latency" + JVMOldGCRateMetricKey = "jvm_old_gc_rate" + JVMOldGCLatencyMetricKey = "jvm_old_gc_latency" + TransportTXRateMetricKey = "transport_tx_rate" + TransportRXRateMetricKey = "transport_rx_rate" + TransportTXBytesMetricKey = "transport_tx_bytes" + TransportRXBytesMetricKey = "transport_rx_bytes" + TransportTCPOutboundMetricKey = "transport_outbound_connections" + TotalIOOperationsMetricKey = "total_io_operations" + TotalReadIOOperationsMetricKey = "total_read_io_operations" + TotalWriteIOOperationsMetricKey = "total_write_io_operations" + ScrollOpenContextsMetricKey = "scroll_open_contexts" + ParentBreakerMetricKey = "parent_breaker" + AccountingBreakerMetricKey = "accounting_breaker" + FielddataBreakerMetricKey = "fielddata_breaker" + RequestBreakerMetricKey = "request_breaker" + InFlightRequestsBreakerMetricKey = "in_flight_requests_breaker" + ModelInferenceBreakerMetricKey = "model_inference_breaker" ) -func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error){ - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) +func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error) { + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) clusterUUID, err := h.getClusterUUID(clusterID) if err != nil { return nil, err @@ -134,7 +134,7 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke { "bool": util.MapStr{ "minimum_should_match": 1, - "should": should, + "should": should, }, }, { @@ -158,7 +158,7 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke if nodeName != "" { nodeNames = strings.Split(nodeName, ",") top = len(nodeNames) - }else{ + } else { nodeNames, err = h.getTopNodeName(clusterID, top, 15) if err != nil { log.Error(err) @@ -181,12 +181,11 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke }, }, }, - }) } - query:=map[string]interface{}{} - query["query"]=util.MapStr{ + query := map[string]interface{}{} + query["query"] = util.MapStr{ "bool": util.MapStr{ "must": must, "filter": []util.MapStr{ @@ -205,133 +204,133 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke switch metricKey { case NodeProcessCPUMetricKey: cpuMetric := newMetricItem(NodeProcessCPUMetricKey, 1, SystemGroupKey) - cpuMetric.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "cpu", - Field: "payload.elasticsearch.node_stats.process.cpu.percent", - ID: util.GetUUID(), - IsDerivative: false, - MetricItem: cpuMetric, - FormatType: "ratio", - Units: "%", + cpuMetric.AddAxi("cpu", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "cpu", + Field: "payload.elasticsearch.node_stats.process.cpu.percent", + ID: util.GetUUID(), + IsDerivative: false, + MetricItem: cpuMetric, + FormatType: "ratio", + Units: "%", }) case NodeOSCPUMetricKey: osCpuMetric := newMetricItem(NodeOSCPUMetricKey, 2, SystemGroupKey) - osCpuMetric.AddAxi("OS CPU Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "os_cpu", - Field: "payload.elasticsearch.node_stats.os.cpu.percent", - ID: util.GetUUID(), + osCpuMetric.AddAxi("OS CPU Percent", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "os_cpu", + Field: "payload.elasticsearch.node_stats.os.cpu.percent", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: osCpuMetric, - FormatType: "ratio", - Units: "%", + MetricItem: osCpuMetric, + FormatType: "ratio", + Units: "%", }) case OSUsedMemoryMetricKey: osMemMetric := newMetricItem(OSUsedMemoryMetricKey, 2, SystemGroupKey) - osMemMetric.AddAxi("OS Mem Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "os_used_mem", - Field: "payload.elasticsearch.node_stats.os.mem.used_percent", - ID: util.GetUUID(), + osMemMetric.AddAxi("OS Mem Used Percent", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "os_used_mem", + Field: "payload.elasticsearch.node_stats.os.mem.used_percent", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: osMemMetric, - FormatType: "ratio", - Units: "%", + MetricItem: osMemMetric, + FormatType: "ratio", + Units: "%", }) case OSLoadAverage1mMetricKey: osLoadMetric := newMetricItem(OSLoadAverage1mMetricKey, 2, SystemGroupKey) - osLoadMetric.AddAxi("OS Load 1m Average","group1",common.PositionLeft,"","0.[0]","0.[0]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "os_load_average_1m", - Field: "payload.elasticsearch.node_stats.os.cpu.load_average.1m", - ID: util.GetUUID(), + osLoadMetric.AddAxi("OS Load 1m Average", "group1", common.PositionLeft, "", "0.[0]", "0.[0]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "os_load_average_1m", + Field: "payload.elasticsearch.node_stats.os.cpu.load_average.1m", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: osLoadMetric, - FormatType: "num", - Units: "", + MetricItem: osLoadMetric, + FormatType: "num", + Units: "", }) case OSUsedSwapMetricKey: //swap usage osSwapMetric := newMetricItem(OSUsedSwapMetricKey, 3, SystemGroupKey) - osSwapMetric.AddAxi("OS Swap Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "os_used_swap", - Field: "payload.elasticsearch.node_stats.os.swap.used_in_bytes", - ID: util.GetUUID(), + osSwapMetric.AddAxi("OS Swap Used Percent", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "os_used_swap", + Field: "payload.elasticsearch.node_stats.os.swap.used_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - Field2: "payload.elasticsearch.node_stats.os.swap.total_in_bytes", + Field2: "payload.elasticsearch.node_stats.os.swap.total_in_bytes", Calc: func(value, value2 float64) float64 { - return util.ToFixed((value / value2)*100, 2) + return util.ToFixed((value/value2)*100, 2) }, MetricItem: osSwapMetric, FormatType: "ratio", - Units: "%", + Units: "%", }) case OpenFileMetricKey: openFileMetric := newMetricItem(OpenFileMetricKey, 2, SystemGroupKey) - openFileMetric.AddAxi("Open File Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "open_file", - Field: "payload.elasticsearch.node_stats.process.open_file_descriptors", - ID: util.GetUUID(), + openFileMetric.AddAxi("Open File Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "open_file", + Field: "payload.elasticsearch.node_stats.process.open_file_descriptors", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: openFileMetric, - FormatType: "num", - Units: "", + MetricItem: openFileMetric, + FormatType: "num", + Units: "", }) case OpenFilePercentMetricKey: openFilePercentMetric := newMetricItem(OpenFilePercentMetricKey, 2, SystemGroupKey) - openFilePercentMetric.AddAxi("Open File Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "open_file_percent", - Field: "payload.elasticsearch.node_stats.process.open_file_descriptors", - ID: util.GetUUID(), + openFilePercentMetric.AddAxi("Open File Percent", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "open_file_percent", + Field: "payload.elasticsearch.node_stats.process.open_file_descriptors", + ID: util.GetUUID(), IsDerivative: false, - Field2: "payload.elasticsearch.node_stats.process.max_file_descriptors", + Field2: "payload.elasticsearch.node_stats.process.max_file_descriptors", Calc: func(value, value2 float64) float64 { if value < 0 { return value } - return util.ToFixed((value / value2)*100, 2) + return util.ToFixed((value/value2)*100, 2) }, MetricItem: openFilePercentMetric, FormatType: "ratio", - Units: "%", + Units: "%", }) case TotalDiskMetricKey: diskMetric := newMetricItem(TotalDiskMetricKey, 2, SystemGroupKey) - diskMetric.AddAxi("disk available percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) + diskMetric.AddAxi("disk available percent", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "disk", - Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes", - ID: util.GetUUID(), + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "disk", + Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: diskMetric, - FormatType: "ratio", - Units: "%", - Field2: "payload.elasticsearch.node_stats.fs.total.available_in_bytes", + MetricItem: diskMetric, + FormatType: "ratio", + Units: "%", + Field2: "payload.elasticsearch.node_stats.fs.total.available_in_bytes", Calc: func(value, value2 float64) float64 { - return util.ToFixed((value2 / value)*100, 2) + return util.ToFixed((value2/value)*100, 2) }, }) case IndexingRateMetricKey: // 索引速率 - indexMetric:=newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey) - indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "indexing_rate", - Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", - ID: util.GetUUID(), + indexMetric := newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey) + indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "indexing_rate", + Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexMetric, - FormatType: "num", - Units: "doc/s", + MetricItem: indexMetric, + FormatType: "num", + Units: "doc/s", }) case IndexingBytesMetricKey: indexingBytesMetric := newMetricItem(IndexingBytesMetricKey, 2, OperationGroupKey) - indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + indexingBytesMetric.AddAxi("Indexing bytes", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ Key: "indexing_bytes", Field: "payload.elasticsearch.node_stats.indices.store.size_in_bytes", @@ -343,702 +342,702 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke }) case IndexingLatencyMetricKey: // 索引延时 - indexLatencyMetric:=newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey) - indexLatencyMetric.AddAxi("indexing latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "indexing_latency", - Field: "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", + indexLatencyMetric := newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey) + indexLatencyMetric.AddAxi("indexing latency", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "indexing_latency", + Field: "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", Field2: "payload.elasticsearch.node_stats.indices.indexing.index_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: indexLatencyMetric, + FormatType: "num", + Units: "ms", }) case QueryRateMetricKey: - queryMetric:=newMetricItem(QueryRateMetricKey, 2, OperationGroupKey) - queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "query_rate", - Field: "payload.elasticsearch.node_stats.indices.search.query_total", - ID: util.GetUUID(), + queryMetric := newMetricItem(QueryRateMetricKey, 2, OperationGroupKey) + queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "query_rate", + Field: "payload.elasticsearch.node_stats.indices.search.query_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: queryMetric, + FormatType: "num", + Units: "requests/s", }) case QueryLatencyMetricKey: // 查询延时 - queryLatencyMetric:=newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey) - queryLatencyMetric.AddAxi("query latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "query_latency", - Field: "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", + queryLatencyMetric := newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey) + queryLatencyMetric.AddAxi("query latency", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "query_latency", + Field: "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", Field2: "payload.elasticsearch.node_stats.indices.search.query_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: queryLatencyMetric, + FormatType: "num", + Units: "ms", }) case FetchRateMetricKey: - fetchMetric:=newMetricItem(FetchRateMetricKey, 3, OperationGroupKey) - fetchMetric.AddAxi("fetch rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "fetch_rate", - Field: "payload.elasticsearch.node_stats.indices.search.fetch_total", - ID: util.GetUUID(), + fetchMetric := newMetricItem(FetchRateMetricKey, 3, OperationGroupKey) + fetchMetric.AddAxi("fetch rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "fetch_rate", + Field: "payload.elasticsearch.node_stats.indices.search.fetch_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: fetchMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: fetchMetric, + FormatType: "num", + Units: "requests/s", }) case ScrollRateMetricKey: - scrollMetric:=newMetricItem(ScrollRateMetricKey, 4, OperationGroupKey) - scrollMetric.AddAxi("scroll rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "scroll_rate", - Field: "payload.elasticsearch.node_stats.indices.search.scroll_total", - ID: util.GetUUID(), + scrollMetric := newMetricItem(ScrollRateMetricKey, 4, OperationGroupKey) + scrollMetric.AddAxi("scroll rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "scroll_rate", + Field: "payload.elasticsearch.node_stats.indices.search.scroll_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: scrollMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: scrollMetric, + FormatType: "num", + Units: "requests/s", }) case RefreshRateMetricKey: - refreshMetric:=newMetricItem(RefreshRateMetricKey, 5, OperationGroupKey) - refreshMetric.AddAxi("refresh rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "refresh_rate", - Field: "payload.elasticsearch.node_stats.indices.refresh.total", - ID: util.GetUUID(), + refreshMetric := newMetricItem(RefreshRateMetricKey, 5, OperationGroupKey) + refreshMetric.AddAxi("refresh rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "refresh_rate", + Field: "payload.elasticsearch.node_stats.indices.refresh.total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: refreshMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: refreshMetric, + FormatType: "num", + Units: "requests/s", }) case FlushRateMetricKey: - flushMetric:=newMetricItem(FlushRateMetricKey, 6, OperationGroupKey) - flushMetric.AddAxi("flush rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "flush_rate", - Field: "payload.elasticsearch.node_stats.indices.flush.total", - ID: util.GetUUID(), + flushMetric := newMetricItem(FlushRateMetricKey, 6, OperationGroupKey) + flushMetric.AddAxi("flush rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "flush_rate", + Field: "payload.elasticsearch.node_stats.indices.flush.total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: flushMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: flushMetric, + FormatType: "num", + Units: "requests/s", }) case MergesRateMetricKey: - mergeMetric:=newMetricItem(MergesRateMetricKey, 7, OperationGroupKey) - mergeMetric.AddAxi("merges rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "merges_rate", - Field: "payload.elasticsearch.node_stats.indices.merges.total", - ID: util.GetUUID(), + mergeMetric := newMetricItem(MergesRateMetricKey, 7, OperationGroupKey) + mergeMetric.AddAxi("merges rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "merges_rate", + Field: "payload.elasticsearch.node_stats.indices.merges.total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: mergeMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: mergeMetric, + FormatType: "num", + Units: "requests/s", }) case FetchLatencyMetricKey: // fetch延时 - fetchLatencyMetric:=newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey) - fetchLatencyMetric.AddAxi("fetch latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "fetch_latency", - Field: "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", + fetchLatencyMetric := newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey) + fetchLatencyMetric.AddAxi("fetch latency", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "fetch_latency", + Field: "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", Field2: "payload.elasticsearch.node_stats.indices.search.fetch_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: fetchLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: fetchLatencyMetric, + FormatType: "num", + Units: "ms", }) case ScrollLatencyMetricKey: // scroll 延时 - scrollLatencyMetric:=newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey) - scrollLatencyMetric.AddAxi("scroll latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "scroll_latency", - Field: "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", + scrollLatencyMetric := newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey) + scrollLatencyMetric.AddAxi("scroll latency", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "scroll_latency", + Field: "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", Field2: "payload.elasticsearch.node_stats.indices.search.scroll_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: scrollLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: scrollLatencyMetric, + FormatType: "num", + Units: "ms", }) case MergeLatencyMetricKey: // merge 延时 - mergeLatencyMetric:=newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey) - mergeLatencyMetric.AddAxi("merge latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "merge_latency", - Field: "payload.elasticsearch.node_stats.indices.merges.total_time_in_millis", + mergeLatencyMetric := newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey) + mergeLatencyMetric.AddAxi("merge latency", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "merge_latency", + Field: "payload.elasticsearch.node_stats.indices.merges.total_time_in_millis", Field2: "payload.elasticsearch.node_stats.indices.merges.total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: mergeLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: mergeLatencyMetric, + FormatType: "num", + Units: "ms", }) case RefreshLatencyMetricKey: // refresh 延时 - refreshLatencyMetric:=newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey) - refreshLatencyMetric.AddAxi("refresh latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "refresh_latency", - Field: "payload.elasticsearch.node_stats.indices.refresh.total_time_in_millis", + refreshLatencyMetric := newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey) + refreshLatencyMetric.AddAxi("refresh latency", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "refresh_latency", + Field: "payload.elasticsearch.node_stats.indices.refresh.total_time_in_millis", Field2: "payload.elasticsearch.node_stats.indices.refresh.total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: refreshLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: refreshLatencyMetric, + FormatType: "num", + Units: "ms", }) case FlushLatencyMetricKey: // flush 时延 - flushLatencyMetric:=newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey) - flushLatencyMetric.AddAxi("flush latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "flush_latency", - Field: "payload.elasticsearch.node_stats.indices.flush.total_time_in_millis", + flushLatencyMetric := newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey) + flushLatencyMetric.AddAxi("flush latency", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "flush_latency", + Field: "payload.elasticsearch.node_stats.indices.flush.total_time_in_millis", Field2: "payload.elasticsearch.node_stats.indices.flush.total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: flushLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: flushLatencyMetric, + FormatType: "num", + Units: "ms", }) case QueryCacheMetricKey: // Query Cache 内存占用大小 - queryCacheMetric:=newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey) - queryCacheMetric.AddAxi("query cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "query_cache", - Field: "payload.elasticsearch.node_stats.indices.query_cache.memory_size_in_bytes", - ID: util.GetUUID(), + queryCacheMetric := newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey) + queryCacheMetric.AddAxi("query cache", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "query_cache", + Field: "payload.elasticsearch.node_stats.indices.query_cache.memory_size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: queryCacheMetric, - FormatType: "bytes", - Units: "", + MetricItem: queryCacheMetric, + FormatType: "bytes", + Units: "", }) case RequestCacheMetricKey: // Request Cache 内存占用大小 - requestCacheMetric:=newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey) - requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "request_cache", - Field: "payload.elasticsearch.node_stats.indices.request_cache.memory_size_in_bytes", - ID: util.GetUUID(), + requestCacheMetric := newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey) + requestCacheMetric.AddAxi("request cache", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "request_cache", + Field: "payload.elasticsearch.node_stats.indices.request_cache.memory_size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: requestCacheMetric, - FormatType: "bytes", - Units: "", + MetricItem: requestCacheMetric, + FormatType: "bytes", + Units: "", }) case RequestCacheHitMetricKey: // Request Cache Hit - requestCacheHitMetric:=newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey) - requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "request_cache_hit", - Field: "payload.elasticsearch.node_stats.indices.request_cache.hit_count", - ID: util.GetUUID(), + requestCacheHitMetric := newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey) + requestCacheHitMetric.AddAxi("request cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "request_cache_hit", + Field: "payload.elasticsearch.node_stats.indices.request_cache.hit_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: requestCacheHitMetric, - FormatType: "num", - Units: "hits", + MetricItem: requestCacheHitMetric, + FormatType: "num", + Units: "hits", }) case RequestCacheMissMetricKey: // Request Cache Miss - requestCacheMissMetric:=newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey) - requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "request_cache_miss", - Field: "payload.elasticsearch.node_stats.indices.request_cache.miss_count", - ID: util.GetUUID(), + requestCacheMissMetric := newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey) + requestCacheMissMetric.AddAxi("request cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "request_cache_miss", + Field: "payload.elasticsearch.node_stats.indices.request_cache.miss_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: requestCacheMissMetric, - FormatType: "num", - Units: "misses", + MetricItem: requestCacheMissMetric, + FormatType: "num", + Units: "misses", }) case QueryCacheCountMetricKey: // Query Cache Count - queryCacheCountMetric:=newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey) - queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "query_cache_count", - Field: "payload.elasticsearch.node_stats.indices.query_cache.cache_count", - ID: util.GetUUID(), + queryCacheCountMetric := newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey) + queryCacheCountMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "query_cache_count", + Field: "payload.elasticsearch.node_stats.indices.query_cache.cache_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryCacheCountMetric, - FormatType: "num", - Units: "", + MetricItem: queryCacheCountMetric, + FormatType: "num", + Units: "", }) case QueryCacheHitMetricKey: - queryCacheHitMetric:=newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey) - queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "query_cache_hit", - Field: "payload.elasticsearch.node_stats.indices.query_cache.hit_count", - ID: util.GetUUID(), + queryCacheHitMetric := newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey) + queryCacheHitMetric.AddAxi("query cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "query_cache_hit", + Field: "payload.elasticsearch.node_stats.indices.query_cache.hit_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryCacheHitMetric, - FormatType: "num", - Units: "hits", + MetricItem: queryCacheHitMetric, + FormatType: "num", + Units: "hits", }) case QueryCacheMissMetricKey: // Query Cache Miss - queryCacheMissMetric:=newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey) - queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "query_cache_miss", - Field: "payload.elasticsearch.node_stats.indices.query_cache.miss_count", - ID: util.GetUUID(), + queryCacheMissMetric := newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey) + queryCacheMissMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "query_cache_miss", + Field: "payload.elasticsearch.node_stats.indices.query_cache.miss_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryCacheMissMetric, - FormatType: "num", - Units: "misses", + MetricItem: queryCacheMissMetric, + FormatType: "num", + Units: "misses", }) case FielddataCacheMetricKey: // Fielddata内存占用大小 - fieldDataCacheMetric:=newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey) - fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "fielddata_cache", - Field: "payload.elasticsearch.node_stats.indices.fielddata.memory_size_in_bytes", - ID: util.GetUUID(), + fieldDataCacheMetric := newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey) + fieldDataCacheMetric.AddAxi("FieldData Cache", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "fielddata_cache", + Field: "payload.elasticsearch.node_stats.indices.fielddata.memory_size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: fieldDataCacheMetric, - FormatType: "bytes", - Units: "", + MetricItem: fieldDataCacheMetric, + FormatType: "bytes", + Units: "", }) case HttpConnectNumMetricKey: // http 活跃连接数 - httpActiveMetric:=newMetricItem(HttpConnectNumMetricKey, 12, HttpGroupKey) - httpActiveMetric.AddAxi("http connect number","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "http_connect_num", - Field: "payload.elasticsearch.node_stats.http.current_open", - ID: util.GetUUID(), + httpActiveMetric := newMetricItem(HttpConnectNumMetricKey, 12, HttpGroupKey) + httpActiveMetric.AddAxi("http connect number", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "http_connect_num", + Field: "payload.elasticsearch.node_stats.http.current_open", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: httpActiveMetric, - FormatType: "num", - Units: "conns", + MetricItem: httpActiveMetric, + FormatType: "num", + Units: "conns", }) case HttpRateMetricKey: // http 活跃连接数速率 - httpRateMetric:=newMetricItem(HttpRateMetricKey, 12, HttpGroupKey) - httpRateMetric.AddAxi("http rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "http_rate", - Field: "payload.elasticsearch.node_stats.http.total_opened", - ID: util.GetUUID(), + httpRateMetric := newMetricItem(HttpRateMetricKey, 12, HttpGroupKey) + httpRateMetric.AddAxi("http rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "http_rate", + Field: "payload.elasticsearch.node_stats.http.total_opened", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: httpRateMetric, - FormatType: "num", - Units: "conns/s", + MetricItem: httpRateMetric, + FormatType: "num", + Units: "conns/s", }) case SegmentCountMetricKey: // segment 数量 - segmentCountMetric:=newMetricItem(SegmentCountMetricKey, 15, StorageGroupKey) - segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "segment_count", - Field: "payload.elasticsearch.node_stats.indices.segments.count", - ID: util.GetUUID(), + segmentCountMetric := newMetricItem(SegmentCountMetricKey, 15, StorageGroupKey) + segmentCountMetric.AddAxi("segment count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "segment_count", + Field: "payload.elasticsearch.node_stats.indices.segments.count", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentCountMetric, - FormatType: "num", - Units: "", + MetricItem: segmentCountMetric, + FormatType: "num", + Units: "", }) case SegmentMemoryMetricKey: // segment memory - segmentMemoryMetric:=newMetricItem(SegmentMemoryMetricKey, 16, MemoryGroupKey) - segmentMemoryMetric.AddAxi("segment memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "segment_memory", - Field: "payload.elasticsearch.node_stats.indices.segments.memory_in_bytes", - ID: util.GetUUID(), + segmentMemoryMetric := newMetricItem(SegmentMemoryMetricKey, 16, MemoryGroupKey) + segmentMemoryMetric.AddAxi("segment memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "segment_memory", + Field: "payload.elasticsearch.node_stats.indices.segments.memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentStoredFieldsMemoryMetricKey: // segment stored fields memory - segmentStoredFieldsMemoryMetric:=newMetricItem(SegmentStoredFieldsMemoryMetricKey, 16, MemoryGroupKey) - segmentStoredFieldsMemoryMetric.AddAxi("segment stored fields memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "segment_stored_fields_memory", - Field: "payload.elasticsearch.node_stats.indices.segments.stored_fields_memory_in_bytes", - ID: util.GetUUID(), + segmentStoredFieldsMemoryMetric := newMetricItem(SegmentStoredFieldsMemoryMetricKey, 16, MemoryGroupKey) + segmentStoredFieldsMemoryMetric.AddAxi("segment stored fields memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "segment_stored_fields_memory", + Field: "payload.elasticsearch.node_stats.indices.segments.stored_fields_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentStoredFieldsMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentStoredFieldsMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentTermsMemoryMetricKey: // segment terms fields memory - segmentTermsMemoryMetric:=newMetricItem(SegmentTermsMemoryMetricKey, 16, MemoryGroupKey) - segmentTermsMemoryMetric.AddAxi("segment terms memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "segment_terms_memory", - Field: "payload.elasticsearch.node_stats.indices.segments.terms_memory_in_bytes", - ID: util.GetUUID(), + segmentTermsMemoryMetric := newMetricItem(SegmentTermsMemoryMetricKey, 16, MemoryGroupKey) + segmentTermsMemoryMetric.AddAxi("segment terms memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "segment_terms_memory", + Field: "payload.elasticsearch.node_stats.indices.segments.terms_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentTermsMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentTermsMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentDocValuesMemoryMetricKey: // segment doc values memory - segmentDocValuesMemoryMetric:=newMetricItem(SegmentDocValuesMemoryMetricKey, 16, MemoryGroupKey) - segmentDocValuesMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "segment_doc_values_memory", - Field: "payload.elasticsearch.node_stats.indices.segments.doc_values_memory_in_bytes", - ID: util.GetUUID(), + segmentDocValuesMemoryMetric := newMetricItem(SegmentDocValuesMemoryMetricKey, 16, MemoryGroupKey) + segmentDocValuesMemoryMetric.AddAxi("segment doc values memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "segment_doc_values_memory", + Field: "payload.elasticsearch.node_stats.indices.segments.doc_values_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentDocValuesMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentDocValuesMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentIndexWriterMemoryMetricKey: // segment index writer memory - segmentIndexWriterMemoryMetric:=newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey) - segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "segment_index_writer_memory", - Field: "payload.elasticsearch.node_stats.indices.segments.index_writer_memory_in_bytes", - ID: util.GetUUID(), + segmentIndexWriterMemoryMetric := newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey) + segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "segment_index_writer_memory", + Field: "payload.elasticsearch.node_stats.indices.segments.index_writer_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentIndexWriterMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentIndexWriterMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentTermVectorsMemoryMetricKey: // segment term vectors memory - segmentTermVectorsMemoryMetric:=newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey) - segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "segment_term_vectors_memory", - Field: "payload.elasticsearch.node_stats.indices.segments.term_vectors_memory_in_bytes", - ID: util.GetUUID(), + segmentTermVectorsMemoryMetric := newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey) + segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "segment_term_vectors_memory", + Field: "payload.elasticsearch.node_stats.indices.segments.term_vectors_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentTermVectorsMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentTermVectorsMemoryMetric, + FormatType: "bytes", + Units: "", }) case DocsCountMetricKey: // docs 数量 - docsCountMetric:=newMetricItem(DocsCountMetricKey, 17, DocumentGroupKey) - docsCountMetric.AddAxi("docs count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "docs_count", - Field: "payload.elasticsearch.node_stats.indices.docs.count", - ID: util.GetUUID(), + docsCountMetric := newMetricItem(DocsCountMetricKey, 17, DocumentGroupKey) + docsCountMetric.AddAxi("docs count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "docs_count", + Field: "payload.elasticsearch.node_stats.indices.docs.count", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: docsCountMetric, - FormatType: "num", - Units: "", + MetricItem: docsCountMetric, + FormatType: "num", + Units: "", }) case DocsDeletedMetricKey: // docs 删除数量 - docsDeletedMetric:=newMetricItem(DocsDeletedMetricKey, 17, DocumentGroupKey) - docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "docs_deleted", - Field: "payload.elasticsearch.node_stats.indices.docs.deleted", - ID: util.GetUUID(), + docsDeletedMetric := newMetricItem(DocsDeletedMetricKey, 17, DocumentGroupKey) + docsDeletedMetric.AddAxi("docs deleted", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "docs_deleted", + Field: "payload.elasticsearch.node_stats.indices.docs.deleted", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: docsDeletedMetric, - FormatType: "num", - Units: "", + MetricItem: docsDeletedMetric, + FormatType: "num", + Units: "", }) case IndexStorageMetricKey: // index store size - indexStoreMetric:=newMetricItem(IndexStorageMetricKey, 18, StorageGroupKey) - indexStoreMetric.AddAxi("indices storage","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "index_storage", - Field: "payload.elasticsearch.node_stats.indices.store.size_in_bytes", - ID: util.GetUUID(), + indexStoreMetric := newMetricItem(IndexStorageMetricKey, 18, StorageGroupKey) + indexStoreMetric.AddAxi("indices storage", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "index_storage", + Field: "payload.elasticsearch.node_stats.indices.store.size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: indexStoreMetric, - FormatType: "bytes", - Units: "", + MetricItem: indexStoreMetric, + FormatType: "bytes", + Units: "", }) case JVMHeapUsedPercentMetricKey: // jvm used heap - jvmUsedPercentMetric:=newMetricItem(JVMHeapUsedPercentMetricKey, 1, JVMGroupKey) - jvmUsedPercentMetric.AddAxi("JVM heap used percent","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_heap_used_percent", - Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_percent", - ID: util.GetUUID(), + jvmUsedPercentMetric := newMetricItem(JVMHeapUsedPercentMetricKey, 1, JVMGroupKey) + jvmUsedPercentMetric.AddAxi("JVM heap used percent", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_heap_used_percent", + Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_percent", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: jvmUsedPercentMetric, - FormatType: "num", - Units: "%", + MetricItem: jvmUsedPercentMetric, + FormatType: "num", + Units: "%", }) case JVMMemYoungUsedMetricKey: //JVM mem Young pools used - youngPoolsUsedMetric:=newMetricItem(JVMMemYoungUsedMetricKey, 2, JVMGroupKey) - youngPoolsUsedMetric.AddAxi("Mem Pools Young Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_mem_young_used", - Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.used_in_bytes", - ID: util.GetUUID(), + youngPoolsUsedMetric := newMetricItem(JVMMemYoungUsedMetricKey, 2, JVMGroupKey) + youngPoolsUsedMetric.AddAxi("Mem Pools Young Used", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_mem_young_used", + Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.used_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: youngPoolsUsedMetric, - FormatType: "bytes", - Units: "", + MetricItem: youngPoolsUsedMetric, + FormatType: "bytes", + Units: "", }) case JVMMemYoungPeakUsedMetricKey: //JVM mem Young pools peak used - youngPoolsUsedPeakMetric:=newMetricItem(JVMMemYoungPeakUsedMetricKey, 2, JVMGroupKey) - youngPoolsUsedPeakMetric.AddAxi("Mem Pools Young Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_mem_young_peak_used", - Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.peak_used_in_bytes", - ID: util.GetUUID(), + youngPoolsUsedPeakMetric := newMetricItem(JVMMemYoungPeakUsedMetricKey, 2, JVMGroupKey) + youngPoolsUsedPeakMetric.AddAxi("Mem Pools Young Peak Used", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_mem_young_peak_used", + Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.peak_used_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: youngPoolsUsedPeakMetric, - FormatType: "bytes", - Units: "", + MetricItem: youngPoolsUsedPeakMetric, + FormatType: "bytes", + Units: "", }) case JVMMemOldUsedMetricKey: //JVM mem old pools used - oldPoolsUsedMetric:=newMetricItem(JVMMemOldUsedMetricKey, 3, JVMGroupKey) - oldPoolsUsedMetric.AddAxi("Mem Pools Old Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_mem_old_used", - Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.used_in_bytes", - ID: util.GetUUID(), + oldPoolsUsedMetric := newMetricItem(JVMMemOldUsedMetricKey, 3, JVMGroupKey) + oldPoolsUsedMetric.AddAxi("Mem Pools Old Used", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_mem_old_used", + Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.used_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: oldPoolsUsedMetric, - FormatType: "bytes", - Units: "", + MetricItem: oldPoolsUsedMetric, + FormatType: "bytes", + Units: "", }) case JVMMemOldPeakUsedMetricKey: //JVM mem old pools peak used - oldPoolsUsedPeakMetric:=newMetricItem(JVMMemOldPeakUsedMetricKey, 3, JVMGroupKey) - oldPoolsUsedPeakMetric.AddAxi("Mem Pools Old Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_mem_old_peak_used", - Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.peak_used_in_bytes", - ID: util.GetUUID(), + oldPoolsUsedPeakMetric := newMetricItem(JVMMemOldPeakUsedMetricKey, 3, JVMGroupKey) + oldPoolsUsedPeakMetric.AddAxi("Mem Pools Old Peak Used", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_mem_old_peak_used", + Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.peak_used_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: oldPoolsUsedPeakMetric, - FormatType: "bytes", - Units: "", + MetricItem: oldPoolsUsedPeakMetric, + FormatType: "bytes", + Units: "", }) case JVMUsedHeapMetricKey: //JVM used heap - heapUsedMetric:=newMetricItem(JVMUsedHeapMetricKey, 1, JVMGroupKey) - heapUsedMetric.AddAxi("JVM Used Heap","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_used_heap", - Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes", - ID: util.GetUUID(), + heapUsedMetric := newMetricItem(JVMUsedHeapMetricKey, 1, JVMGroupKey) + heapUsedMetric.AddAxi("JVM Used Heap", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_used_heap", + Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: heapUsedMetric, - FormatType: "bytes", - Units: "", + MetricItem: heapUsedMetric, + FormatType: "bytes", + Units: "", }) case JVMYoungGCRateMetricKey: //JVM Young GC Rate - gcYoungRateMetric:=newMetricItem(JVMYoungGCRateMetricKey, 2, JVMGroupKey) - gcYoungRateMetric.AddAxi("JVM Young GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_young_gc_rate", - Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_count", - ID: util.GetUUID(), + gcYoungRateMetric := newMetricItem(JVMYoungGCRateMetricKey, 2, JVMGroupKey) + gcYoungRateMetric.AddAxi("JVM Young GC Rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_young_gc_rate", + Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: gcYoungRateMetric, - FormatType: "num", - Units: "times/s", + MetricItem: gcYoungRateMetric, + FormatType: "num", + Units: "times/s", }) case JVMYoungGCLatencyMetricKey: //JVM Young GC Latency - gcYoungLatencyMetric:=newMetricItem(JVMYoungGCLatencyMetricKey, 2, JVMGroupKey) - gcYoungLatencyMetric.AddAxi("JVM Young GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_young_gc_latency", - Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_time_in_millis", - ID: util.GetUUID(), + gcYoungLatencyMetric := newMetricItem(JVMYoungGCLatencyMetricKey, 2, JVMGroupKey) + gcYoungLatencyMetric.AddAxi("JVM Young GC Time", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_young_gc_latency", + Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_time_in_millis", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: gcYoungLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: gcYoungLatencyMetric, + FormatType: "num", + Units: "ms", }) case JVMOldGCRateMetricKey: //JVM old GC Rate - gcOldRateMetric:=newMetricItem(JVMOldGCRateMetricKey, 3, JVMGroupKey) - gcOldRateMetric.AddAxi("JVM Old GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_old_gc_rate", - Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_count", - ID: util.GetUUID(), + gcOldRateMetric := newMetricItem(JVMOldGCRateMetricKey, 3, JVMGroupKey) + gcOldRateMetric.AddAxi("JVM Old GC Rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_old_gc_rate", + Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: gcOldRateMetric, - FormatType: "num", - Units: "times/s", + MetricItem: gcOldRateMetric, + FormatType: "num", + Units: "times/s", }) case JVMOldGCLatencyMetricKey: //JVM old GC Latency - gcOldLatencyMetric:=newMetricItem(JVMOldGCLatencyMetricKey, 3, JVMGroupKey) - gcOldLatencyMetric.AddAxi("JVM Old GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "jvm_old_gc_latency", - Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_time_in_millis", - ID: util.GetUUID(), + gcOldLatencyMetric := newMetricItem(JVMOldGCLatencyMetricKey, 3, JVMGroupKey) + gcOldLatencyMetric.AddAxi("JVM Old GC Time", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "jvm_old_gc_latency", + Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_time_in_millis", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: gcOldLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: gcOldLatencyMetric, + FormatType: "num", + Units: "ms", }) case TransportTXRateMetricKey: //Transport 发送速率 - transTxRateMetric:=newMetricItem(TransportTXRateMetricKey, 19, TransportGroupKey) - transTxRateMetric.AddAxi("Transport Send Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "transport_tx_rate", - Field: "payload.elasticsearch.node_stats.transport.tx_count", - ID: util.GetUUID(), + transTxRateMetric := newMetricItem(TransportTXRateMetricKey, 19, TransportGroupKey) + transTxRateMetric.AddAxi("Transport Send Rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "transport_tx_rate", + Field: "payload.elasticsearch.node_stats.transport.tx_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: transTxRateMetric, - FormatType: "num", - Units: "times/s", + MetricItem: transTxRateMetric, + FormatType: "num", + Units: "times/s", }) case TransportRXRateMetricKey: //Transport 接收速率 - transRxRateMetric:=newMetricItem(TransportRXRateMetricKey, 19, TransportGroupKey) - transRxRateMetric.AddAxi("Transport Receive Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "transport_rx_rate", - Field: "payload.elasticsearch.node_stats.transport.rx_count", - ID: util.GetUUID(), + transRxRateMetric := newMetricItem(TransportRXRateMetricKey, 19, TransportGroupKey) + transRxRateMetric.AddAxi("Transport Receive Rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "transport_rx_rate", + Field: "payload.elasticsearch.node_stats.transport.rx_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: transRxRateMetric, - FormatType: "num", - Units: "times/s", + MetricItem: transRxRateMetric, + FormatType: "num", + Units: "times/s", }) case TransportTXBytesMetricKey: //Transport 发送流量 - transTxBytesMetric:=newMetricItem(TransportTXBytesMetricKey, 19, TransportGroupKey) - transTxBytesMetric.AddAxi("Transport Send Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "transport_tx_bytes", - Field: "payload.elasticsearch.node_stats.transport.tx_size_in_bytes", - ID: util.GetUUID(), + transTxBytesMetric := newMetricItem(TransportTXBytesMetricKey, 19, TransportGroupKey) + transTxBytesMetric.AddAxi("Transport Send Bytes", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "transport_tx_bytes", + Field: "payload.elasticsearch.node_stats.transport.tx_size_in_bytes", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: transTxBytesMetric, - FormatType: "bytes", - Units: "s", + MetricItem: transTxBytesMetric, + FormatType: "bytes", + Units: "s", }) case TransportRXBytesMetricKey: //Transport 接收流量 - transRxBytesMetric:=newMetricItem(TransportRXBytesMetricKey, 19, TransportGroupKey) - transRxBytesMetric.AddAxi("Transport Receive Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "transport_rx_bytes", - Field: "payload.elasticsearch.node_stats.transport.rx_size_in_bytes", - ID: util.GetUUID(), + transRxBytesMetric := newMetricItem(TransportRXBytesMetricKey, 19, TransportGroupKey) + transRxBytesMetric.AddAxi("Transport Receive Bytes", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "transport_rx_bytes", + Field: "payload.elasticsearch.node_stats.transport.rx_size_in_bytes", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: transRxBytesMetric, - FormatType: "bytes", - Units: "s", + MetricItem: transRxBytesMetric, + FormatType: "bytes", + Units: "s", }) case TransportTCPOutboundMetricKey: //Transport tcp 连接数 - tcpNumMetric:=newMetricItem(TransportTCPOutboundMetricKey, 20, TransportGroupKey) - tcpNumMetric.AddAxi("Transport Outbound Connections","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "transport_outbound_connections", - Field: "payload.elasticsearch.node_stats.transport.total_outbound_connections", - ID: util.GetUUID(), + tcpNumMetric := newMetricItem(TransportTCPOutboundMetricKey, 20, TransportGroupKey) + tcpNumMetric.AddAxi("Transport Outbound Connections", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "transport_outbound_connections", + Field: "payload.elasticsearch.node_stats.transport.total_outbound_connections", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: tcpNumMetric, - FormatType: "num", - Units: "", + MetricItem: tcpNumMetric, + FormatType: "num", + Units: "", }) case TotalIOOperationsMetricKey: //IO total - totalOperationsMetric:=newMetricItem(TotalIOOperationsMetricKey, 1, IOGroupKey) - totalOperationsMetric.AddAxi("Total I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "total_io_operations", - Field: "payload.elasticsearch.node_stats.fs.io_stats.total.operations", - ID: util.GetUUID(), + totalOperationsMetric := newMetricItem(TotalIOOperationsMetricKey, 1, IOGroupKey) + totalOperationsMetric.AddAxi("Total I/O Operations Rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "total_io_operations", + Field: "payload.elasticsearch.node_stats.fs.io_stats.total.operations", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: totalOperationsMetric, - FormatType: "num", - Units: "times/s", + MetricItem: totalOperationsMetric, + FormatType: "num", + Units: "times/s", }) case TotalReadIOOperationsMetricKey: - readOperationsMetric:=newMetricItem(TotalReadIOOperationsMetricKey, 2, IOGroupKey) - readOperationsMetric.AddAxi("Total Read I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "total_read_io_operations", - Field: "payload.elasticsearch.node_stats.fs.io_stats.total.read_operations", - ID: util.GetUUID(), + readOperationsMetric := newMetricItem(TotalReadIOOperationsMetricKey, 2, IOGroupKey) + readOperationsMetric.AddAxi("Total Read I/O Operations Rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "total_read_io_operations", + Field: "payload.elasticsearch.node_stats.fs.io_stats.total.read_operations", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: readOperationsMetric, - FormatType: "num", - Units: "times/s", + MetricItem: readOperationsMetric, + FormatType: "num", + Units: "times/s", }) case TotalWriteIOOperationsMetricKey: - writeOperationsMetric:=newMetricItem(TotalWriteIOOperationsMetricKey, 3, IOGroupKey) - writeOperationsMetric.AddAxi("Total Write I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "total_write_io_operations", - Field: "payload.elasticsearch.node_stats.fs.io_stats.total.write_operations", - ID: util.GetUUID(), + writeOperationsMetric := newMetricItem(TotalWriteIOOperationsMetricKey, 3, IOGroupKey) + writeOperationsMetric.AddAxi("Total Write I/O Operations Rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "total_write_io_operations", + Field: "payload.elasticsearch.node_stats.fs.io_stats.total.write_operations", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: writeOperationsMetric, - FormatType: "num", - Units: "times/s", + MetricItem: writeOperationsMetric, + FormatType: "num", + Units: "times/s", }) case ScrollOpenContextsMetricKey: //scroll context - openContextMetric:=newMetricItem(ScrollOpenContextsMetricKey, 7, OperationGroupKey) - openContextMetric.AddAxi("Scroll Open Contexts","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "scroll_open_contexts", - Field: "payload.elasticsearch.node_stats.indices.search.open_contexts", - ID: util.GetUUID(), + openContextMetric := newMetricItem(ScrollOpenContextsMetricKey, 7, OperationGroupKey) + openContextMetric.AddAxi("Scroll Open Contexts", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "scroll_open_contexts", + Field: "payload.elasticsearch.node_stats.indices.search.open_contexts", + ID: util.GetUUID(), MetricItem: openContextMetric, FormatType: "num", - Units: "", + Units: "", }) case ParentBreakerMetricKey: // Circuit Breaker parentBreakerMetric := newMetricItem(ParentBreakerMetricKey, 1, CircuitBreakerGroupKey) - parentBreakerMetric.AddAxi("Parent Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + parentBreakerMetric.AddAxi("Parent Breaker", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ Key: "parent_breaker", Field: "payload.elasticsearch.node_stats.breakers.parent.tripped", @@ -1050,7 +1049,7 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke }) case AccountingBreakerMetricKey: accountingBreakerMetric := newMetricItem(AccountingBreakerMetricKey, 2, CircuitBreakerGroupKey) - accountingBreakerMetric.AddAxi("Accounting Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + accountingBreakerMetric.AddAxi("Accounting Breaker", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ Key: "accounting_breaker", Field: "payload.elasticsearch.node_stats.breakers.accounting.tripped", @@ -1062,7 +1061,7 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke }) case FielddataBreakerMetricKey: fielddataBreakerMetric := newMetricItem(FielddataBreakerMetricKey, 3, CircuitBreakerGroupKey) - fielddataBreakerMetric.AddAxi("Fielddata Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + fielddataBreakerMetric.AddAxi("Fielddata Breaker", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ Key: "fielddata_breaker", Field: "payload.elasticsearch.node_stats.breakers.fielddata.tripped", @@ -1074,7 +1073,7 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke }) case RequestBreakerMetricKey: requestBreakerMetric := newMetricItem(RequestBreakerMetricKey, 4, CircuitBreakerGroupKey) - requestBreakerMetric.AddAxi("Request Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + requestBreakerMetric.AddAxi("Request Breaker", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ Key: "request_breaker", Field: "payload.elasticsearch.node_stats.breakers.request.tripped", @@ -1086,7 +1085,7 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke }) case InFlightRequestsBreakerMetricKey: inFlightRequestBreakerMetric := newMetricItem(InFlightRequestsBreakerMetricKey, 5, CircuitBreakerGroupKey) - inFlightRequestBreakerMetric.AddAxi("In Flight Requests Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + inFlightRequestBreakerMetric.AddAxi("In Flight Requests Breaker", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ Key: "in_flight_requests_breaker", Field: "payload.elasticsearch.node_stats.breakers.in_flight_requests.tripped", @@ -1099,7 +1098,7 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke case ModelInferenceBreakerMetricKey: //Elasticsearch 8.6+ Model Inference Breaker modelInferenceBreakerMetric := newMetricItem(ModelInferenceBreakerMetricKey, 6, CircuitBreakerGroupKey) - modelInferenceBreakerMetric.AddAxi("Model Inference Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + modelInferenceBreakerMetric.AddAxi("Model Inference Breaker", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ Key: "model_inference_breaker", Field: "payload.elasticsearch.node_stats.breakers.model_inference.tripped", @@ -1128,7 +1127,7 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke "aggs": util.MapStr{ "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": aggs, @@ -1140,16 +1139,16 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke } -func (h *APIHandler) getTopNodeName(clusterID string, top int, lastMinutes int) ([]string, error){ +func (h *APIHandler) getTopNodeName(clusterID string, top int, lastMinutes int) ([]string, error) { ver := h.Client().GetVersion() cr, _ := util.VersionCompare(ver.Number, "6.1") if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && cr == -1 { return nil, nil } var ( - now = time.Now() - max = now.UnixNano()/1e6 - min = now.Add(-time.Duration(lastMinutes) * time.Minute).UnixNano()/1e6 + now = time.Now() + max = now.UnixNano() / 1e6 + min = now.Add(-time.Duration(lastMinutes)*time.Minute).UnixNano() / 1e6 bucketSizeStr = "60s" ) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) @@ -1217,7 +1216,7 @@ func (h *APIHandler) getTopNodeName(clusterID string, top int, lastMinutes int) }, "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -1256,7 +1255,7 @@ func (h *APIHandler) getTopNodeName(clusterID string, top int, lastMinutes int) }, "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -1276,20 +1275,20 @@ func (h *APIHandler) getTopNodeName(clusterID string, top int, lastMinutes int) }, }, } - response,err:=elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(),util.MustToJSONBytes(query)) - if err!=nil{ + response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query)) + if err != nil { log.Error(err) return nil, err } - var maxQpsKVS = map[string] float64{} + var maxQpsKVS = map[string]float64{} for _, agg := range response.Aggregations { for _, bk := range agg.Buckets { key := bk["key"].(string) if maxQps, ok := bk["max_qps"].(map[string]interface{}); ok { val := maxQps["value"].(float64) - if _, ok = maxQpsKVS[key] ; ok { + if _, ok = maxQpsKVS[key]; ok { maxQpsKVS[key] = maxQpsKVS[key] + val - }else{ + } else { maxQpsKVS[key] = val } } @@ -1310,8 +1309,8 @@ func (h *APIHandler) getTopNodeName(clusterID string, top int, lastMinutes int) length = len(qpsValues) } nodeNames := []string{} - for i := 0; i 1 { - query["sort"] = []util.MapStr{ + query["sort"] = []util.MapStr{ { reqBody.Sort[0]: util.MapStr{ "order": reqBody.Sort[1], @@ -190,7 +185,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.NodeConfig{}), dsl) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) return } w.Write(util.MustToJSONBytes(response)) @@ -299,7 +294,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps source["shard_info"] = shardInfo } if tempClusterID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "cluster_id"}, result); ok { - if clusterID, ok := tempClusterID.(string); ok { + if clusterID, ok := tempClusterID.(string); ok { if meta := elastic.GetMetadata(clusterID); meta != nil && meta.ClusterState != nil { source["is_master_node"] = meta.ClusterState.MasterNode == nodeID } @@ -317,28 +312,28 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps return } // 索引速率 - indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) - indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) + indexMetric := newMetricItem("indexing", 1, OperationGroupKey) + indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) nodeMetricItems := []GroupMetricItem{} - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "indexing", - Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", - ID: util.GetUUID(), + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "indexing", + Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexMetric, - FormatType: "num", - Units: "Indexing/s", + MetricItem: indexMetric, + FormatType: "num", + Units: "Indexing/s", }) - queryMetric:=newMetricItem("search", 2, OperationGroupKey) - queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "search", - Field: "payload.elasticsearch.node_stats.indices.search.query_total", - ID: util.GetUUID(), + queryMetric := newMetricItem("search", 2, OperationGroupKey) + queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "search", + Field: "payload.elasticsearch.node_stats.indices.search.query_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryMetric, - FormatType: "num", - Units: "Search/s", + MetricItem: queryMetric, + FormatType: "num", + Units: "Search/s", }) bucketSize := GetMinBucketSize() @@ -346,11 +341,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps bucketSize = 60 } var metricLen = 15 - aggs:=map[string]interface{}{} - query=map[string]interface{}{} - query["query"]=util.MapStr{ + aggs := map[string]interface{}{} + query = map[string]interface{}{} + query["query"] = util.MapStr{ "bool": util.MapStr{ - "must": []util.MapStr{ + "must": []util.MapStr{ { "term": util.MapStr{ "metadata.category": util.MapStr{ @@ -375,7 +370,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps { "range": util.MapStr{ "timestamp": util.MapStr{ - "gte": fmt.Sprintf("now-%ds", metricLen * bucketSize), + "gte": fmt.Sprintf("now-%ds", metricLen*bucketSize), }, }, }, @@ -383,15 +378,15 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps }, } - for _,metricItem:=range nodeMetricItems{ - aggs[metricItem.ID]=util.MapStr{ - "max":util.MapStr{ + for _, metricItem := range nodeMetricItems { + aggs[metricItem.ID] = util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field, }, } - if metricItem.IsDerivative{ - aggs[metricItem.ID+"_deriv"]=util.MapStr{ - "derivative":util.MapStr{ + if metricItem.IsDerivative { + aggs[metricItem.ID+"_deriv"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID, }, } @@ -403,8 +398,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps if err != nil { panic(err) } - query["size"]=0 - query["aggs"]= util.MapStr{ + query["size"] = 0 + query["aggs"] = util.MapStr{ "group_by_level": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.node_id", @@ -412,11 +407,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps }, "aggs": util.MapStr{ "dates": util.MapStr{ - "date_histogram":util.MapStr{ - "field": "timestamp", + "date_histogram": util.MapStr{ + "field": "timestamp", intervalField: bucketSizeStr, }, - "aggs":aggs, + "aggs": aggs, }, }, }, @@ -430,9 +425,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps indexMetrics := map[string]util.MapStr{} for key, item := range metrics { for _, line := range item.Lines { - if _, ok := indexMetrics[line.Metric.Label]; !ok{ - indexMetrics[line.Metric.Label] = util.MapStr{ - } + if _, ok := indexMetrics[line.Metric.Label]; !ok { + indexMetrics[line.Metric.Label] = util.MapStr{} } indexMetrics[line.Metric.Label][key] = line.Data } @@ -493,7 +487,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht // return //} q1 := orm.Query{ - Size: 1, + Size: 1, WildcardIndex: true, } q1.Conds = orm.And( @@ -518,7 +512,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht tt, _ := time.Parse(time.RFC3339, ts) if time.Now().Sub(tt).Seconds() > 30 { kvs["status"] = "unavailable" - }else{ + } else { kvs["status"] = "available" } } @@ -536,9 +530,9 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht jvm, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_stats", "jvm"}, vresult) if ok { - if jvmVal, ok := jvm.(map[string]interface{});ok { + if jvmVal, ok := jvm.(map[string]interface{}); ok { kvs["jvm"] = util.MapStr{ - "mem": jvmVal["mem"], + "mem": jvmVal["mem"], "uptime": jvmVal["uptime_in_millis"], } } @@ -559,7 +553,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht } } } - if len( response.Hits.Hits) > 0 { + if len(response.Hits.Hits) > 0 { hit := response.Hits.Hits[0] innerMetaData, _ := util.GetMapValueByKeys([]string{"metadata", "labels"}, hit.Source) if mp, ok := innerMetaData.(map[string]interface{}); ok { @@ -593,15 +587,15 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque } should := []util.MapStr{ { - "term":util.MapStr{ - "metadata.labels.cluster_id":util.MapStr{ + "term": util.MapStr{ + "metadata.labels.cluster_id": util.MapStr{ "value": clusterID, }, }, }, { - "term":util.MapStr{ - "metadata.labels.cluster_uuid":util.MapStr{ + "term": util.MapStr{ + "metadata.labels.cluster_uuid": util.MapStr{ "value": clusterUUID, }, }, @@ -632,19 +626,19 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque }, } resBody := map[string]interface{}{} - bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req,clusterID, v1.MetricTypeNodeStats,60) + bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, v1.MetricTypeNodeStats, 60) if err != nil { log.Error(err) resBody["error"] = err h.WriteJSON(w, resBody, http.StatusInternalServerError) return } - query:=map[string]interface{}{} - query["query"]=util.MapStr{ + query := map[string]interface{}{} + query["query"] = util.MapStr{ "bool": util.MapStr{ - "must": must, + "must": must, "minimum_should_match": 1, - "should": should, + "should": should, "filter": []util.MapStr{ { "range": util.MapStr{ @@ -658,8 +652,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque }, } - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) - metricItems:=[]*common.MetricItem{} + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) + metricItems := []*common.MetricItem{} metricKey := h.GetParameter(req, "key") timeout := h.GetParameterOrDefault(req, "timeout", "60s") du, err := time.ParseDuration(timeout) @@ -679,13 +673,13 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque return } metrics["node_health"] = healthMetric - }else if metricKey == ShardStateMetricKey { + } else if metricKey == ShardStateMetricKey { query = util.MapStr{ "size": 0, "query": util.MapStr{ "bool": util.MapStr{ "minimum_should_match": 1, - "should": should, + "should": should, "must": []util.MapStr{ { "term": util.MapStr{ @@ -729,74 +723,74 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque return } metrics["shard_state"] = shardStateMetric - }else{ + } else { switch metricKey { case NodeProcessCPUMetricKey: - metricItem:=newMetricItem("cpu", 1, SystemGroupKey) - metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) - metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false) - metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false) - metricItems=append(metricItems,metricItem) + metricItem := newMetricItem("cpu", 1, SystemGroupKey) + metricItem.AddAxi("cpu", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true) + metricItem.AddLine("Process CPU", "Process CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.process.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false) + metricItem.AddLine("OS CPU", "OS CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.os.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false) + metricItems = append(metricItems, metricItem) case NodeCPUJVMMetricKey: metricItem := newMetricItem("jvm", 2, SystemGroupKey) - metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) - metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false) - metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false) - metricItems=append(metricItems,metricItem) + metricItem.AddAxi("JVM Heap", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) + metricItem.AddLine("Max Heap", "Max Heap", "JVM max Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false) + metricItem.AddLine("Used Heap", "Used Heap", "JVM used Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false) + metricItems = append(metricItems, metricItem) case v1.IndexThroughputMetricKey: metricItem := newMetricItem("index_throughput", 3, OperationGroupKey) - metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true) - metricItems=append(metricItems,metricItem) + metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + metricItem.AddLine("Indexing Rate", "Total Shards", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true) + metricItems = append(metricItems, metricItem) case v1.SearchThroughputMetricKey: metricItem := newMetricItem("search_throughput", 4, OperationGroupKey) - metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false) - metricItem.AddLine("Search Rate","Total Shards", + metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false) + metricItem.AddLine("Search Rate", "Total Shards", "Number of search requests being executed.", - "group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true) - metricItems=append(metricItems,metricItem) + "group1", "payload.elasticsearch.node_stats.indices.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true) + metricItems = append(metricItems, metricItem) case v1.IndexLatencyMetricKey: metricItem := newMetricItem("index_latency", 5, LatencyGroupKey) - metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) + metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) - metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total" metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total" metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItems=append(metricItems,metricItem) + metricItems = append(metricItems, metricItem) case v1.SearchLatencyMetricKey: metricItem := newMetricItem("search_latency", 6, LatencyGroupKey) - metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false) + metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false) - metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total" metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total" metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total" metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItems=append(metricItems,metricItem) + metricItems = append(metricItems, metricItem) case ParentBreakerMetricKey: metricItem := newMetricItem("parent_breaker", 8, SystemGroupKey) - metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true) - metricItems=append(metricItems,metricItem) + metricItem.AddLine("Parent Breaker Tripped", "Parent Breaker Tripped", "Rate of the circuit breaker has been triggered and prevented an out of memory error.", "group1", "payload.elasticsearch.node_stats.breakers.parent.tripped", "max", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true) + metricItems = append(metricItems, metricItem) } - metrics, err = h.getSingleMetrics(ctx, metricItems,query, bucketSize) + metrics, err = h.getSingleMetrics(ctx, metricItems, query, bucketSize) if err != nil { log.Error(err) h.WriteError(w, err, http.StatusInternalServerError) @@ -808,7 +802,7 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque minBucketSize, err := v1.GetMetricMinBucketSize(clusterID, v1.MetricTypeNodeStats) if err != nil { log.Error(err) - }else{ + } else { metrics[metricKey].MinBucketSize = int64(minBucketSize) } } @@ -818,8 +812,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque h.WriteJSON(w, resBody, http.StatusOK) } -func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){ - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) +func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize int) (*common.MetricItem, error) { + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) if err != nil { return nil, err @@ -828,14 +822,14 @@ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize query["aggs"] = util.MapStr{ "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ "groups": util.MapStr{ "terms": util.MapStr{ "field": "payload.elasticsearch.shard_stats.routing.state", - "size": 10, + "size": 10, }, }, }, @@ -848,8 +842,8 @@ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize return nil, err } - metricItem:=newMetricItem("shard_state", 0, "") - metricItem.AddLine("Shard State","Shard State","","group1","payload.elasticsearch.shard_stats.routing.state","count",bucketSizeStr,"","ratio","0.[00]","0.[00]",false,false) + metricItem := newMetricItem("shard_state", 0, "") + metricItem.AddLine("Shard State", "Shard State", "", "group1", "payload.elasticsearch.shard_stats.routing.state", "count", bucketSizeStr, "", "ratio", "0.[00]", "0.[00]", false, false) metricData := []interface{}{} if response.StatusCode == 200 { @@ -864,8 +858,8 @@ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize return metricItem, nil } -func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){ - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) +func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int) (*common.MetricItem, error) { + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) if err != nil { return nil, err @@ -873,7 +867,7 @@ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int) query["aggs"] = util.MapStr{ "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -886,14 +880,14 @@ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int) }, } queryDSL := util.MustToJSONBytes(query) - response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) + response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) if err != nil { log.Error(err) return nil, err } - metricItem:=newMetricItem("node_health", 0, "") - metricItem.AddLine("Node health","Node Health","","group1","payload.elasticsearch.node_stats.jvm.uptime_in_millis","min",bucketSizeStr,"%","ratio","0.[00]","0.[00]",false,false) + metricItem := newMetricItem("node_health", 0, "") + metricItem.AddLine("Node health", "Node Health", "", "group1", "payload.elasticsearch.node_stats.jvm.uptime_in_millis", "min", bucketSizeStr, "%", "ratio", "0.[00]", "0.[00]", false, false) metricData := []interface{}{} if response.StatusCode == 200 { @@ -923,7 +917,7 @@ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int) return metricItem, nil } -func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, error){ +func getNodeOnlineStatusOfRecentDay(nodeIDs []string) (map[string][]interface{}, error) { q := orm.Query{ WildcardIndex: true, } @@ -932,64 +926,64 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, "group_by_node_id": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.node_id", - "size": 100, + "size": 100, }, "aggs": util.MapStr{ "uptime_histogram": util.MapStr{ - "date_range": util.MapStr{ + "date_range": util.MapStr{ "field": "timestamp", "format": "yyyy-MM-dd", "time_zone": "+08:00", "ranges": []util.MapStr{ { "from": "now-13d/d", - "to": "now-12d/d", + "to": "now-12d/d", }, { "from": "now-12d/d", - "to": "now-11d/d", + "to": "now-11d/d", }, { "from": "now-11d/d", - "to": "now-10d/d", + "to": "now-10d/d", }, { "from": "now-10d/d", - "to": "now-9d/d", + "to": "now-9d/d", }, { "from": "now-9d/d", - "to": "now-8d/d", + "to": "now-8d/d", }, { "from": "now-8d/d", - "to": "now-7d/d", + "to": "now-7d/d", }, { "from": "now-7d/d", - "to": "now-6d/d", + "to": "now-6d/d", }, { "from": "now-6d/d", - "to": "now-5d/d", + "to": "now-5d/d", }, { "from": "now-5d/d", - "to": "now-4d/d", + "to": "now-4d/d", }, { "from": "now-4d/d", - "to": "now-3d/d", - },{ + "to": "now-3d/d", + }, { "from": "now-3d/d", - "to": "now-2d/d", + "to": "now-2d/d", }, { "from": "now-2d/d", - "to": "now-1d/d", + "to": "now-1d/d", }, { "from": "now-1d/d", - "to": "now/d", + "to": "now/d", }, { "from": "now/d", - "to": "now", + "to": "now", }, }, }, @@ -1018,7 +1012,7 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, { "range": util.MapStr{ "timestamp": util.MapStr{ - "gte":"now-15d", + "gte": "now-15d", "lte": "now", }, }, @@ -1056,13 +1050,13 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, recentStatus[nodeKey] = []interface{}{} if histogramAgg, ok := bk["uptime_histogram"].(map[string]interface{}); ok { if bks, ok := histogramAgg["buckets"].([]interface{}); ok { - for _, bkItem := range bks { + for _, bkItem := range bks { if bkVal, ok := bkItem.(map[string]interface{}); ok { if minUptime, ok := util.GetMapValueByKeys([]string{"min_uptime", "value"}, bkVal); ok { //mark node status as offline when uptime less than 10m if v, ok := minUptime.(float64); ok && v >= 600000 { recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "online"}) - }else{ + } else { recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "offline"}) } } @@ -1080,10 +1074,10 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps max = h.GetParameterOrDefault(req, "max", "now") ) - resBody := map[string] interface{}{} + resBody := map[string]interface{}{} id := ps.ByName("id") nodeUUID := ps.ByName("node_id") - q := &orm.Query{ Size: 1} + q := &orm.Query{Size: 1} q.AddSort("timestamp", orm.DESC) q.Conds = orm.And( orm.Eq("metadata.category", "elasticsearch"), @@ -1095,16 +1089,16 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps err, result := orm.Search(event.Event{}, q) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } namesM := util.MapStr{} if len(result.Result) > 0 { if data, ok := result.Result[0].(map[string]interface{}); ok { if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_routing_table"}, data); exists { - if rows, ok := routingTable.([]interface{}); ok{ + if rows, ok := routingTable.([]interface{}); ok { for _, row := range rows { if v, ok := row.(map[string]interface{}); ok { - if indexName, ok := v["index"].(string); ok{ + if indexName, ok := v["index"].(string); ok { namesM[indexName] = true } } @@ -1114,12 +1108,12 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps } } - indexNames := make([]interface{}, 0, len(namesM) ) + indexNames := make([]interface{}, 0, len(namesM)) for name, _ := range namesM { indexNames = append(indexNames, name) } - q1 := &orm.Query{ Size: 100} + q1 := &orm.Query{Size: 100} q1.AddSort("timestamp", orm.DESC) q1.Conds = orm.And( orm.Eq("metadata.category", "elasticsearch"), @@ -1130,28 +1124,29 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps err, result = orm.Search(elastic.IndexConfig{}, q1) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } indices, err := h.getLatestIndices(req, min, max, id, &result) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } h.WriteJSON(w, indices, http.StatusOK) } type ShardsSummary struct { - Index string `json:"index"` - Shards int `json:"shards"` - Replicas int `json:"replicas"` - DocsCount int64 `json:"docs_count"` - DocsDeleted int64 `json:"docs_deleted"` - StoreInBytes int64 `json:"store_in_bytes"` - PriStoreInBytes int64 `json:"pri_store_in_bytes"` - Timestamp interface{} `json:"timestamp"` + Index string `json:"index"` + Shards int `json:"shards"` + Replicas int `json:"replicas"` + DocsCount int64 `json:"docs_count"` + DocsDeleted int64 `json:"docs_deleted"` + StoreInBytes int64 `json:"store_in_bytes"` + PriStoreInBytes int64 `json:"pri_store_in_bytes"` + Timestamp interface{} `json:"timestamp"` } + func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, clusterID string, result *orm.Result) ([]interface{}, error) { //filter indices allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, clusterID) @@ -1165,7 +1160,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, query := util.MapStr{ "size": 10000, - "_source": []string{"metadata.labels.index_name", "payload.elasticsearch.shard_stats.docs","payload.elasticsearch.shard_stats.store", "payload.elasticsearch.shard_stats.routing", "timestamp"}, + "_source": []string{"metadata.labels.index_name", "payload.elasticsearch.shard_stats.docs", "payload.elasticsearch.shard_stats.store", "payload.elasticsearch.shard_stats.routing", "timestamp"}, "collapse": util.MapStr{ "field": "metadata.labels.shard_id", }, @@ -1240,7 +1235,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, } if primary == true { indexInfo.Shards++ - }else{ + } else { indexInfo.Replicas++ } indexInfo.Timestamp = hitM["timestamp"] @@ -1249,7 +1244,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, } indices := []interface{}{} var indexPattern *radix.Pattern - if !hasAllPrivilege{ + if !hasAllPrivilege { indexPattern = radix.Compile(allowedIndices...) } @@ -1273,21 +1268,21 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, } if indexInfos[v] != nil { indices = append(indices, util.MapStr{ - "index": v, - "status": state, - "health": health, - "timestamp": indexInfos[v].Timestamp, - "docs_count": indexInfos[v].DocsCount, - "shards": indexInfos[v].Shards, - "replicas": replicasNum, - "unassigned_shards": (replicasNum + 1) * shardsNum - indexInfos[v].Shards - replicasNum, - "store_size": util.FormatBytes(float64(indexInfos[v].StoreInBytes), 1), + "index": v, + "status": state, + "health": health, + "timestamp": indexInfos[v].Timestamp, + "docs_count": indexInfos[v].DocsCount, + "shards": indexInfos[v].Shards, + "replicas": replicasNum, + "unassigned_shards": (replicasNum+1)*shardsNum - indexInfos[v].Shards - replicasNum, + "store_size": util.FormatBytes(float64(indexInfos[v].StoreInBytes), 1), }) } else { indices = append(indices, util.MapStr{ "index": v, "status": state, - "health": health, + "health": health, "timestamp": hitM["timestamp"], }) } @@ -1297,7 +1292,6 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, return indices, nil } - func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { clusterID := ps.MustGetParameter("id") if GetMonitorState(clusterID) == elastic.ModeAgentless { @@ -1306,7 +1300,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps } nodeID := ps.MustGetParameter("node_id") q1 := orm.Query{ - Size: 1000, + Size: 1000, WildcardIndex: true, CollapseField: "metadata.labels.shard_id", } @@ -1327,7 +1321,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps err, result := orm.Search(&event.Event{}, &q1) if err != nil { log.Error(err) - h.WriteError(w, err.Error(), http.StatusInternalServerError ) + h.WriteError(w, err.Error(), http.StatusInternalServerError) return } var shards = []interface{}{} @@ -1360,7 +1354,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps primary, _ := shardM.GetValue("routing.primary") if primary == true { shardInfo["prirep"] = "p" - }else{ + } else { shardInfo["prirep"] = "r" } shardInfo["state"], _ = shardM.GetValue("routing.state") @@ -1380,7 +1374,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps h.WriteJSON(w, shards, http.StatusOK) } -//deleteNodeMetadata used to clean node metadata after node is offline and not active within 7 days +// deleteNodeMetadata used to clean node metadata after node is offline and not active within 7 days func (h APIHandler) deleteNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) indexName := orm.GetIndexName(elastic.NodeConfig{}) @@ -1409,4 +1403,4 @@ func (h APIHandler) deleteNodeMetadata(w http.ResponseWriter, req *http.Request, h.WriteError(w, err, http.StatusInternalServerError) } h.WriteAckOKJSON(w) -} \ No newline at end of file +} diff --git a/modules/elastic/api/proxy.go b/modules/elastic/api/proxy.go index a8b2c29f..b5f0b061 100644 --- a/modules/elastic/api/proxy.go +++ b/modules/elastic/api/proxy.go @@ -278,5 +278,3 @@ func rewriteTableNamesOfSqlRequest(req *http.Request, distribution string) (stri } return strings.Join(unescapedTableNames, ","), nil } - - diff --git a/modules/elastic/api/search.go b/modules/elastic/api/search.go index 5863248e..b9bc975b 100644 --- a/modules/elastic/api/search.go +++ b/modules/elastic/api/search.go @@ -38,11 +38,10 @@ import ( "time" ) -func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody := map[string] interface{}{ - } +func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := map[string]interface{}{} targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) @@ -51,8 +50,8 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req return } - if !exists{ - resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID) log.Error(resBody["error"]) h.WriteJSON(w, resBody, http.StatusNotFound) return @@ -69,7 +68,7 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req } var body = map[string]interface{}{ "script": map[string]interface{}{ - "lang": "mustache", + "lang": "mustache", "source": template.Source, }, } @@ -89,7 +88,7 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req template.Created = time.Now() template.Updated = template.Created template.ClusterID = targetClusterID - index:=orm.GetIndexName(elastic.SearchTemplate{}) + index := orm.GetIndexName(elastic.SearchTemplate{}) insertRes, err := esClient.Index(index, "", id, template, "wait_for") if err != nil { log.Error(err) @@ -102,14 +101,13 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req resBody["_id"] = id resBody["result"] = insertRes.Result - h.WriteJSON(w, resBody,http.StatusOK) + h.WriteJSON(w, resBody, http.StatusOK) } -func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody := map[string] interface{}{ - } +func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := map[string]interface{}{} targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) @@ -118,8 +116,8 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req return } - if !exists{ - resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID) log.Error(resBody["error"]) h.WriteJSON(w, resBody, http.StatusNotFound) return @@ -136,8 +134,8 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req } templateID := ps.ByName("template_id") esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) - index:=orm.GetIndexName(elastic.SearchTemplate{}) - getRes, err := esClient.Get(index, "",templateID) + index := orm.GetIndexName(elastic.SearchTemplate{}) + getRes, err := esClient.Get(index, "", templateID) if err != nil { log.Error(err) resBody["error"] = err.Error() @@ -197,9 +195,9 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req ht := &elastic.SearchTemplateHistory{ TemplateID: templateID, - Action: "update", - Content: originTemplate, - Created: time.Now(), + Action: "update", + Content: originTemplate, + Created: time.Now(), } esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "") @@ -207,14 +205,13 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req resBody["_id"] = templateID resBody["result"] = insertRes.Result - h.WriteJSON(w, resBody,http.StatusOK) + h.WriteJSON(w, resBody, http.StatusOK) } -func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody := map[string] interface{}{ - } +func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := map[string]interface{}{} targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) resBody["error"] = err.Error() @@ -222,8 +219,8 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req return } - if !exists{ - resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID) log.Error(resBody["error"]) h.WriteJSON(w, resBody, http.StatusNotFound) return @@ -231,7 +228,7 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req templateID := ps.ByName("template_id") - index:=orm.GetIndexName(elastic.SearchTemplate{}) + index := orm.GetIndexName(elastic.SearchTemplate{}) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) res, err := esClient.Get(index, "", templateID) if err != nil { @@ -258,9 +255,9 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req ht := &elastic.SearchTemplateHistory{ TemplateID: templateID, - Action: "delete", - Content: res.Source, - Created: time.Now(), + Action: "delete", + Content: res.Source, + Created: time.Now(), } _, err = esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "wait_for") if err != nil { @@ -273,21 +270,20 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req } -func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody := map[string] interface{}{ - } +func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := map[string]interface{}{} var ( - name = h.GetParameterOrDefault(req, "name", "") - strFrom = h.GetParameterOrDefault(req, "from", "0") - strSize = h.GetParameterOrDefault(req, "size", "20") - queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}` + name = h.GetParameterOrDefault(req, "name", "") + strFrom = h.GetParameterOrDefault(req, "from", "0") + strSize = h.GetParameterOrDefault(req, "size", "20") + queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}` mustBuilder = &strings.Builder{} ) from, _ := strconv.Atoi(strFrom) size, _ := strconv.Atoi(strSize) targetClusterID := ps.ByName("id") mustBuilder.WriteString(fmt.Sprintf(`{"match":{"cluster_id": "%s"}}`, targetClusterID)) - if name != ""{ + if name != "" { mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"name": "%s"}}`, name)) } @@ -305,8 +301,8 @@ func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req h.WriteJSON(w, res, http.StatusOK) } -func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody := map[string] interface{}{} +func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := map[string]interface{}{} id := ps.ByName("template_id") indexName := orm.GetIndexName(elastic.SearchTemplate{}) @@ -314,31 +310,30 @@ func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *h if err != nil { log.Error(err) resBody["error"] = err.Error() - if getResponse!=nil{ + if getResponse != nil { h.WriteJSON(w, resBody, getResponse.StatusCode) - }else{ + } else { h.WriteJSON(w, resBody, http.StatusInternalServerError) } return } - h.WriteJSON(w,getResponse,200) + h.WriteJSON(w, getResponse, 200) } -func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody := map[string] interface{}{ - } +func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := map[string]interface{}{} var ( - templateID = h.GetParameterOrDefault(req, "template_id", "") - strFrom = h.GetParameterOrDefault(req, "from", "0") - strSize = h.GetParameterOrDefault(req, "size", "20") - queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}` + templateID = h.GetParameterOrDefault(req, "template_id", "") + strFrom = h.GetParameterOrDefault(req, "from", "0") + strSize = h.GetParameterOrDefault(req, "size", "20") + queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}` mustBuilder = &strings.Builder{} ) from, _ := strconv.Atoi(strFrom) size, _ := strconv.Atoi(strSize) targetClusterID := ps.ByName("id") mustBuilder.WriteString(fmt.Sprintf(`{"match":{"content.cluster_id": "%s"}}`, targetClusterID)) - if templateID != ""{ + if templateID != "" { mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"template_id": "%s"}}`, templateID)) } @@ -356,11 +351,10 @@ func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWrit h.WriteJSON(w, res, http.StatusOK) } -func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody := map[string] interface{}{ - } +func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := map[string]interface{}{} targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) resBody["error"] = err.Error() @@ -368,8 +362,8 @@ func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http return } - if !exists{ - resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID) log.Error(resBody["error"]) h.WriteJSON(w, resBody, http.StatusNotFound) return @@ -394,11 +388,10 @@ func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http h.WriteJSON(w, string(res), http.StatusOK) } -func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody := map[string] interface{}{ - } +func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := map[string]interface{}{} targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) resBody["error"] = err.Error() @@ -406,8 +399,8 @@ func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http return } - if !exists{ - resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID) log.Error(resBody["error"]) h.WriteJSON(w, resBody, http.StatusNotFound) return @@ -430,4 +423,4 @@ func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http } h.WriteJSON(w, string(res), http.StatusOK) -} \ No newline at end of file +} diff --git a/modules/elastic/api/setting.go b/modules/elastic/api/setting.go index a747fe02..941b84f4 100644 --- a/modules/elastic/api/setting.go +++ b/modules/elastic/api/setting.go @@ -36,8 +36,7 @@ import ( ) func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string]interface{}{ - } + resBody := map[string]interface{}{} targetClusterID := ps.ByName("id") esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) @@ -58,12 +57,11 @@ func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Reques searchRes, err := esClient.SearchWithRawQueryDSL(indexName, []byte(queryDSL)) if len(searchRes.Hits.Hits) > 0 { _, err = esClient.Index(indexName, "", searchRes.Hits.Hits[0].ID, reqParams, "wait_for") - }else{ + } else { reqParams.ID = util.GetUUID() _, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for") } - if err != nil { log.Error(err) resBody["error"] = err @@ -71,12 +69,11 @@ func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Reques return } resBody["acknowledged"] = true - h.WriteJSON(w, resBody ,http.StatusOK) + h.WriteJSON(w, resBody, http.StatusOK) } func (h *APIHandler) HandleGetSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string]interface{}{ - } + resBody := map[string]interface{}{} targetClusterID := ps.ByName("id") esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) @@ -94,8 +91,8 @@ func (h *APIHandler) HandleGetSettingAction(w http.ResponseWriter, req *http.Req var value interface{} if len(searchRes.Hits.Hits) > 0 { value = searchRes.Hits.Hits[0].Source["value"] - }else{ + } else { value = "" } - h.WriteJSON(w, value ,http.StatusOK) + h.WriteJSON(w, value, http.StatusOK) } diff --git a/modules/elastic/api/shard.go b/modules/elastic/api/shard.go index 109aa152..54784fc0 100644 --- a/modules/elastic/api/shard.go +++ b/modules/elastic/api/shard.go @@ -28,12 +28,12 @@ package api import ( + log "github.com/cihub/seelog" + httprouter "infini.sh/framework/core/api/router" "infini.sh/framework/core/event" "infini.sh/framework/core/orm" "infini.sh/framework/modules/elastic/adapter" "net/http" - log "github.com/cihub/seelog" - httprouter "infini.sh/framework/core/api/router" ) func (h *APIHandler) GetShardInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { diff --git a/modules/elastic/api/template.go b/modules/elastic/api/template.go index f1b70f92..1ed601ea 100644 --- a/modules/elastic/api/template.go +++ b/modules/elastic/api/template.go @@ -36,7 +36,7 @@ import ( "src/github.com/buger/jsonparser" ) -func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ +func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { clusterID := ps.MustGetParameter("id") esClient := elastic.GetClient(clusterID) templates, err := esClient.GetTemplate("") @@ -48,7 +48,7 @@ func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Re h.WriteJSON(w, templates, http.StatusOK) } -func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ +func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { clusterID := ps.MustGetParameter("id") templateName := ps.MustGetParameter("template_name") esClient := elastic.GetClient(clusterID) @@ -66,10 +66,10 @@ func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.R } resErr, _, _, _ := jsonparser.Get(esResBody, "error") if resErr != nil { - errStr := string(resErr) + errStr := string(resErr) log.Errorf("put template error: %s", errStr) h.WriteError(w, errStr, http.StatusInternalServerError) return } h.WriteAckOKJSON(w) -} \ No newline at end of file +} diff --git a/modules/elastic/api/threadpool_metrics.go b/modules/elastic/api/threadpool_metrics.go index a7bae610..01561db1 100644 --- a/modules/elastic/api/threadpool_metrics.go +++ b/modules/elastic/api/threadpool_metrics.go @@ -35,57 +35,57 @@ import ( ) const ( - ThreadPoolGetGroupKey = "thread_pool_get" - ThreadPoolSearchGroupKey = "thread_pool_search" - ThreadPoolFlushGroupKey = "thread_pool_flush" - ThreadPoolRefreshGroupKey = "thread_pool_refresh" + ThreadPoolGetGroupKey = "thread_pool_get" + ThreadPoolSearchGroupKey = "thread_pool_search" + ThreadPoolFlushGroupKey = "thread_pool_flush" + ThreadPoolRefreshGroupKey = "thread_pool_refresh" ThreadPoolWriteGroupKey = "thread_pool_write" ThreadPoolForceMergeGroupKey = "thread_pool_force_merge" - ThreadPoolIndexGroupKey = "thread_pool_index" - ThreadPoolBulkGroupKey = "thread_pool_bulk" + ThreadPoolIndexGroupKey = "thread_pool_index" + ThreadPoolBulkGroupKey = "thread_pool_bulk" ) const ( - SearchThreadsMetricKey = "search_threads" - IndexThreadsMetricKey = "index_threads" - BulkThreadsMetricKey = "bulk_threads" - FlushThreadsMetricKey = "flush_threads" - RefreshThreadsMetricKey = "refresh_threads" - WriteThreadsMetricKey = "write_threads" - ForceMergeThreadsMetricKey = "force_merge_threads" - SearchQueueMetricKey = "search_queue" - IndexQueueMetricKey = "index_queue" - BulkQueueMetricKey = "bulk_queue" - FlushQueueMetricKey = "flush_queue" - RefreshQueueMetricKey = "refresh_queue" - WriteQueueMetricKey = "write_queue" - SearchActiveMetricKey = "search_active" - IndexActiveMetricKey = "index_active" - BulkActiveMetricKey = "bulk_active" - FlushActiveMetricKey = "flush_active" - WriteActiveMetricKey = "write_active" - ForceMergeActiveMetricKey = "force_merge_active" - SearchRejectedMetricKey = "search_rejected" - IndexRejectedMetricKey = "index_rejected" - BulkRejectedMetricKey = "bulk_rejected" - FlushRejectedMetricKey = "flush_rejected" - WriteRejectedMetricKey = "write_rejected" + SearchThreadsMetricKey = "search_threads" + IndexThreadsMetricKey = "index_threads" + BulkThreadsMetricKey = "bulk_threads" + FlushThreadsMetricKey = "flush_threads" + RefreshThreadsMetricKey = "refresh_threads" + WriteThreadsMetricKey = "write_threads" + ForceMergeThreadsMetricKey = "force_merge_threads" + SearchQueueMetricKey = "search_queue" + IndexQueueMetricKey = "index_queue" + BulkQueueMetricKey = "bulk_queue" + FlushQueueMetricKey = "flush_queue" + RefreshQueueMetricKey = "refresh_queue" + WriteQueueMetricKey = "write_queue" + SearchActiveMetricKey = "search_active" + IndexActiveMetricKey = "index_active" + BulkActiveMetricKey = "bulk_active" + FlushActiveMetricKey = "flush_active" + WriteActiveMetricKey = "write_active" + ForceMergeActiveMetricKey = "force_merge_active" + SearchRejectedMetricKey = "search_rejected" + IndexRejectedMetricKey = "index_rejected" + BulkRejectedMetricKey = "bulk_rejected" + FlushRejectedMetricKey = "flush_rejected" + WriteRejectedMetricKey = "write_rejected" ForceMergeRejectedMetricKey = "force_merge_rejected" - GetThreadsMetricKey = "get_threads" - GetQueueMetricKey = "get_queue" - GetActiveMetricKey = "get_active" - GetRejectedMetricKey = "get_rejected" - RefreshActiveMetricKey = "refresh_active" - RefreshRejectedMetricKey = "refresh_rejected" - ForceMergeQueueMetricKey = "force_merge_queue" + GetThreadsMetricKey = "get_threads" + GetQueueMetricKey = "get_queue" + GetActiveMetricKey = "get_active" + GetRejectedMetricKey = "get_rejected" + RefreshActiveMetricKey = "refresh_active" + RefreshRejectedMetricKey = "refresh_rejected" + ForceMergeQueueMetricKey = "force_merge_queue" ) -func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error){ +func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error) { clusterUUID, err := h.getClusterUUID(clusterID) if err != nil { return nil, err } - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) var must = []util.MapStr{ { "term": util.MapStr{ @@ -108,7 +108,7 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, if nodeName != "" { nodeNames = strings.Split(nodeName, ",") top = len(nodeNames) - }else{ + } else { nodeNames, err = h.getTopNodeName(clusterID, top, 15) if err != nil { log.Error(err) @@ -131,10 +131,9 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, }, }, }, - }) } - should := []util.MapStr{ + should := []util.MapStr{ { "term": util.MapStr{ "metadata.labels.cluster_id": util.MapStr{ @@ -143,20 +142,20 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, }, }, { - "term":util.MapStr{ - "metadata.labels.cluster_uuid":util.MapStr{ + "term": util.MapStr{ + "metadata.labels.cluster_uuid": util.MapStr{ "value": clusterUUID, }, }, }, } - query:=map[string]interface{}{} - query["query"]=util.MapStr{ + query := map[string]interface{}{} + query["query"] = util.MapStr{ "bool": util.MapStr{ - "must": must, + "must": must, "minimum_should_match": 1, - "should": should, + "should": should, "filter": []util.MapStr{ { "range": util.MapStr{ @@ -173,159 +172,159 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, switch metricKey { case SearchThreadsMetricKey: searchThreadsMetric := newMetricItem(SearchThreadsMetricKey, 1, ThreadPoolSearchGroupKey) - searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + searchThreadsMetric.AddAxi("Search Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "search_threads", - Field: "payload.elasticsearch.node_stats.thread_pool.search.threads", - ID: util.GetUUID(), + Key: "search_threads", + Field: "payload.elasticsearch.node_stats.thread_pool.search.threads", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: searchThreadsMetric, - FormatType: "num", - Units: "", + MetricItem: searchThreadsMetric, + FormatType: "num", + Units: "", }) case SearchQueueMetricKey: searchQueueMetric := newMetricItem(SearchQueueMetricKey, 1, ThreadPoolSearchGroupKey) - searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + searchQueueMetric.AddAxi("Search Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "search_queue", - Field: "payload.elasticsearch.node_stats.thread_pool.search.queue", - ID: util.GetUUID(), + Key: "search_queue", + Field: "payload.elasticsearch.node_stats.thread_pool.search.queue", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: searchQueueMetric, - FormatType: "num", - Units: "", + MetricItem: searchQueueMetric, + FormatType: "num", + Units: "", }) case SearchActiveMetricKey: searchActiveMetric := newMetricItem(SearchActiveMetricKey, 1, ThreadPoolSearchGroupKey) - searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + searchActiveMetric.AddAxi("Search Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "search_active", - Field: "payload.elasticsearch.node_stats.thread_pool.search.active", - ID: util.GetUUID(), + Key: "search_active", + Field: "payload.elasticsearch.node_stats.thread_pool.search.active", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: searchActiveMetric, - FormatType: "num", - Units: "", + MetricItem: searchActiveMetric, + FormatType: "num", + Units: "", }) case SearchRejectedMetricKey: searchRejectedMetric := newMetricItem(SearchRejectedMetricKey, 1, ThreadPoolSearchGroupKey) - searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + searchRejectedMetric.AddAxi("Search Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "search_rejected", - Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected", - ID: util.GetUUID(), + Key: "search_rejected", + Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: searchRejectedMetric, - FormatType: "num", - Units: "rejected/s", + MetricItem: searchRejectedMetric, + FormatType: "num", + Units: "rejected/s", }) case GetThreadsMetricKey: getThreadsMetric := newMetricItem(GetThreadsMetricKey, 1, ThreadPoolGetGroupKey) - getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + getThreadsMetric.AddAxi("Get Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "get_threads", - Field: "payload.elasticsearch.node_stats.thread_pool.get.threads", - ID: util.GetUUID(), + Key: "get_threads", + Field: "payload.elasticsearch.node_stats.thread_pool.get.threads", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: getThreadsMetric, - FormatType: "num", - Units: "", + MetricItem: getThreadsMetric, + FormatType: "num", + Units: "", }) case GetQueueMetricKey: getQueueMetric := newMetricItem(GetQueueMetricKey, 1, ThreadPoolGetGroupKey) - getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + getQueueMetric.AddAxi("Get Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "get_queue", - Field: "payload.elasticsearch.node_stats.thread_pool.get.queue", - ID: util.GetUUID(), + Key: "get_queue", + Field: "payload.elasticsearch.node_stats.thread_pool.get.queue", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: getQueueMetric, - FormatType: "num", - Units: "", + MetricItem: getQueueMetric, + FormatType: "num", + Units: "", }) case GetActiveMetricKey: getActiveMetric := newMetricItem(GetActiveMetricKey, 1, ThreadPoolGetGroupKey) - getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + getActiveMetric.AddAxi("Get Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "get_active", - Field: "payload.elasticsearch.node_stats.thread_pool.get.active", - ID: util.GetUUID(), + Key: "get_active", + Field: "payload.elasticsearch.node_stats.thread_pool.get.active", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: getActiveMetric, - FormatType: "num", - Units: "", + MetricItem: getActiveMetric, + FormatType: "num", + Units: "", }) case GetRejectedMetricKey: getRejectedMetric := newMetricItem(GetRejectedMetricKey, 1, ThreadPoolGetGroupKey) - getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + getRejectedMetric.AddAxi("Get Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "get_rejected", - Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected", - ID: util.GetUUID(), + Key: "get_rejected", + Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: getRejectedMetric, - FormatType: "num", - Units: "rejected/s", + MetricItem: getRejectedMetric, + FormatType: "num", + Units: "rejected/s", }) case FlushThreadsMetricKey: flushThreadsMetric := newMetricItem(FlushThreadsMetricKey, 1, ThreadPoolFlushGroupKey) - flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + flushThreadsMetric.AddAxi("Flush Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "flush_threads", - Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads", - ID: util.GetUUID(), + Key: "flush_threads", + Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: flushThreadsMetric, - FormatType: "num", - Units: "", + MetricItem: flushThreadsMetric, + FormatType: "num", + Units: "", }) case FlushQueueMetricKey: flushQueueMetric := newMetricItem(FlushQueueMetricKey, 1, ThreadPoolFlushGroupKey) - flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + flushQueueMetric.AddAxi("Get Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "flush_queue", - Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue", - ID: util.GetUUID(), + Key: "flush_queue", + Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: flushQueueMetric, - FormatType: "num", - Units: "", + MetricItem: flushQueueMetric, + FormatType: "num", + Units: "", }) case FlushActiveMetricKey: flushActiveMetric := newMetricItem(FlushActiveMetricKey, 1, ThreadPoolFlushGroupKey) - flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + flushActiveMetric.AddAxi("Flush Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "flush_active", - Field: "payload.elasticsearch.node_stats.thread_pool.flush.active", - ID: util.GetUUID(), + Key: "flush_active", + Field: "payload.elasticsearch.node_stats.thread_pool.flush.active", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: flushActiveMetric, - FormatType: "num", - Units: "", + MetricItem: flushActiveMetric, + FormatType: "num", + Units: "", }) case FlushRejectedMetricKey: flushRejectedMetric := newMetricItem(FlushRejectedMetricKey, 1, ThreadPoolFlushGroupKey) - flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + flushRejectedMetric.AddAxi("Flush Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "flush_rejected", - Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected", - ID: util.GetUUID(), + Key: "flush_rejected", + Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: flushRejectedMetric, - FormatType: "num", - Units: "rejected/s", + MetricItem: flushRejectedMetric, + FormatType: "num", + Units: "rejected/s", }) case IndexThreadsMetricKey: indexThreadsMetric := newMetricItem(IndexThreadsMetricKey, 1, ThreadPoolIndexGroupKey) @@ -485,137 +484,136 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, }) case RefreshThreadsMetricKey: refreshThreadsMetric := newMetricItem(RefreshThreadsMetricKey, 1, ThreadPoolRefreshGroupKey) - refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + refreshThreadsMetric.AddAxi("Refresh Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "refresh_threads", - Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads", - ID: util.GetUUID(), + Key: "refresh_threads", + Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: refreshThreadsMetric, - FormatType: "num", - Units: "", + MetricItem: refreshThreadsMetric, + FormatType: "num", + Units: "", }) case RefreshQueueMetricKey: refreshQueueMetric := newMetricItem(RefreshQueueMetricKey, 1, ThreadPoolRefreshGroupKey) - refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + refreshQueueMetric.AddAxi("Refresh Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "refresh_queue", - Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue", - ID: util.GetUUID(), + Key: "refresh_queue", + Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: refreshQueueMetric, - FormatType: "num", - Units: "", + MetricItem: refreshQueueMetric, + FormatType: "num", + Units: "", }) case RefreshActiveMetricKey: refreshActiveMetric := newMetricItem(RefreshActiveMetricKey, 1, ThreadPoolRefreshGroupKey) - refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + refreshActiveMetric.AddAxi("Refresh Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "refresh_active", - Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active", - ID: util.GetUUID(), + Key: "refresh_active", + Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: refreshActiveMetric, - FormatType: "num", - Units: "", + MetricItem: refreshActiveMetric, + FormatType: "num", + Units: "", }) case RefreshRejectedMetricKey: refreshRejectedMetric := newMetricItem(RefreshRejectedMetricKey, 1, ThreadPoolRefreshGroupKey) - refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + refreshRejectedMetric.AddAxi("Refresh Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "refresh_rejected", - Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected", - ID: util.GetUUID(), + Key: "refresh_rejected", + Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: refreshRejectedMetric, - FormatType: "num", - Units: "rejected/s", + MetricItem: refreshRejectedMetric, + FormatType: "num", + Units: "rejected/s", }) case ForceMergeThreadsMetricKey: forceMergeThreadsMetric := newMetricItem(ForceMergeThreadsMetricKey, 1, ThreadPoolForceMergeGroupKey) - forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + forceMergeThreadsMetric.AddAxi("Force Merge Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "force_merge_threads", - Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads", - ID: util.GetUUID(), + Key: "force_merge_threads", + Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: forceMergeThreadsMetric, - FormatType: "num", - Units: "", + MetricItem: forceMergeThreadsMetric, + FormatType: "num", + Units: "", }) case ForceMergeQueueMetricKey: forceMergeQueueMetric := newMetricItem(ForceMergeQueueMetricKey, 1, ThreadPoolForceMergeGroupKey) - forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + forceMergeQueueMetric.AddAxi("Force Merge Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "force_merge_queue", - Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue", - ID: util.GetUUID(), + Key: "force_merge_queue", + Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: forceMergeQueueMetric, - FormatType: "num", - Units: "", + MetricItem: forceMergeQueueMetric, + FormatType: "num", + Units: "", }) case ForceMergeActiveMetricKey: forceMergeActiveMetric := newMetricItem(ForceMergeActiveMetricKey, 1, ThreadPoolForceMergeGroupKey) - forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + forceMergeActiveMetric.AddAxi("Force Merge Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "force_merge_active", - Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active", - ID: util.GetUUID(), + Key: "force_merge_active", + Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: forceMergeActiveMetric, - FormatType: "num", - Units: "", + MetricItem: forceMergeActiveMetric, + FormatType: "num", + Units: "", }) case ForceMergeRejectedMetricKey: forceMergeRejectedMetric := newMetricItem(ForceMergeRejectedMetricKey, 1, ThreadPoolForceMergeGroupKey) - forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) queueMetricItems = append(queueMetricItems, GroupMetricItem{ - Key: "force_merge_rejected", - Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected", - ID: util.GetUUID(), + Key: "force_merge_rejected", + Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: forceMergeRejectedMetric, - FormatType: "num", - Units: "rejected/s", + MetricItem: forceMergeRejectedMetric, + FormatType: "num", + Units: "rejected/s", }) } - //Get Thread Pool queue - aggs:=map[string]interface{}{} + aggs := map[string]interface{}{} - for _,metricItem:=range queueMetricItems{ - aggs[metricItem.ID]=util.MapStr{ - "max":util.MapStr{ + for _, metricItem := range queueMetricItems { + aggs[metricItem.ID] = util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field, }, } if metricItem.Field2 != "" { - aggs[metricItem.ID + "_field2"]=util.MapStr{ - "max":util.MapStr{ + aggs[metricItem.ID+"_field2"] = util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field2, }, } } - if metricItem.IsDerivative{ - aggs[metricItem.ID+"_deriv"]=util.MapStr{ - "derivative":util.MapStr{ + if metricItem.IsDerivative { + aggs[metricItem.ID+"_deriv"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID, }, } if metricItem.Field2 != "" { - aggs[metricItem.ID + "_field2_deriv"]=util.MapStr{ - "derivative":util.MapStr{ + aggs[metricItem.ID+"_field2_deriv"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID + "_field2", }, } @@ -628,8 +626,8 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, panic(err) } - query["size"]=0 - query["aggs"]= util.MapStr{ + query["size"] = 0 + query["aggs"] = util.MapStr{ "group_by_level": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.transport_address", @@ -637,11 +635,11 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, }, "aggs": util.MapStr{ "dates": util.MapStr{ - "date_histogram":util.MapStr{ - "field": "timestamp", + "date_histogram": util.MapStr{ + "field": "timestamp", intervalField: bucketSizeStr, }, - "aggs":aggs, + "aggs": aggs, }, }, }, diff --git a/modules/elastic/api/trace_template.go b/modules/elastic/api/trace_template.go index 87561319..54c79993 100644 --- a/modules/elastic/api/trace_template.go +++ b/modules/elastic/api/trace_template.go @@ -38,10 +38,9 @@ import ( ) func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string] interface{}{ - } + resBody := map[string]interface{}{} targetClusterID := ps.ByName("id") - exists,client,err:=h.GetClusterClient(targetClusterID) + exists, client, err := h.GetClusterClient(targetClusterID) if err != nil { log.Error(err) @@ -50,16 +49,14 @@ func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req * return } - if !exists{ - resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) + if !exists { + resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID) log.Error(resBody["error"]) h.WriteJSON(w, resBody, http.StatusNotFound) return } - var traceReq = &elastic.TraceTemplate{ - - } + var traceReq = &elastic.TraceTemplate{} err = h.DecodeJSON(req, traceReq) if err != nil { @@ -84,22 +81,21 @@ func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req * resBody["_id"] = insertRes.ID resBody["result"] = insertRes.Result - h.WriteJSON(w, resBody,http.StatusOK) + h.WriteJSON(w, resBody, http.StatusOK) } func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string] interface{}{ - } + resBody := map[string]interface{}{} var ( - name = h.GetParameterOrDefault(req, "name", "") - queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}` - strSize = h.GetParameterOrDefault(req, "size", "20") + name = h.GetParameterOrDefault(req, "name", "") + queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}` + strSize = h.GetParameterOrDefault(req, "size", "20") strFrom = h.GetParameterOrDefault(req, "from", "0") mustBuilder = &strings.Builder{} ) targetClusterID := ps.ByName("id") mustBuilder.WriteString(fmt.Sprintf(`{"term":{"cluster_id":{"value": "%s"}}}`, targetClusterID)) - if name != ""{ + if name != "" { mustBuilder.WriteString(fmt.Sprintf(`,{"prefix":{"name": "%s"}}`, name)) } size, _ := strconv.Atoi(strSize) @@ -126,8 +122,7 @@ func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req } func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string]interface{}{ - } + resBody := map[string]interface{}{} reqParams := elastic.TraceTemplate{} err := h.DecodeJSON(req, &reqParams) @@ -140,7 +135,7 @@ func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *h reqParams.ID = ps.ByName("template_id") reqParams.Updated = time.Now() esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) - _, err = esClient.Index(orm.GetIndexName(reqParams),"", reqParams.ID, reqParams, "wait_for") + _, err = esClient.Index(orm.GetIndexName(reqParams), "", reqParams.ID, reqParams, "wait_for") if err != nil { log.Error(err) resBody["error"] = err.Error() @@ -152,11 +147,11 @@ func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *h resBody["result"] = "updated" resBody["_source"] = reqParams - h.WriteJSON(w, resBody,http.StatusOK) + h.WriteJSON(w, resBody, http.StatusOK) } -func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ - resBody := map[string] interface{}{} +func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + resBody := map[string]interface{}{} id := ps.ByName("template_id") indexName := orm.GetIndexName(elastic.TraceTemplate{}) @@ -166,7 +161,7 @@ func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *ht resBody["error"] = err.Error() h.WriteJSON(w, resBody, http.StatusInternalServerError) } - h.WriteJSON(w,getResponse, getResponse.StatusCode) + h.WriteJSON(w, getResponse, getResponse.StatusCode) } func (h *APIHandler) HandleDeleteTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { @@ -177,9 +172,9 @@ func (h *APIHandler) HandleDeleteTraceTemplateAction(w http.ResponseWriter, req if err != nil { log.Error(err) resBody["error"] = err.Error() - if delRes!=nil{ + if delRes != nil { h.WriteJSON(w, resBody, delRes.StatusCode) - }else{ + } else { h.WriteJSON(w, resBody, http.StatusInternalServerError) } } diff --git a/modules/elastic/api/v1/cluster_overview.go b/modules/elastic/api/v1/cluster_overview.go index 6b65fe27..9c0865d4 100644 --- a/modules/elastic/api/v1/cluster_overview.go +++ b/modules/elastic/api/v1/cluster_overview.go @@ -178,7 +178,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request, } histgram := common.NewBucketItem( common.DateHistogramBucket, util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }) histgram.AddMetricItems(metricItems...) @@ -669,8 +669,8 @@ type RealtimeNodeInfo struct { func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) { ver := h.Client().GetVersion() - bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) - intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) + bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) + intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) if err != nil { return nil, err } @@ -685,7 +685,7 @@ func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map "aggs": util.MapStr{ "date": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: "10s", }, "aggs": util.MapStr{ @@ -775,9 +775,9 @@ func (h *APIHandler) QueryQPS(query util.MapStr, bucketSizeInSeconds int) (map[s maxIndexRate float64 maxQueryRate float64 maxIndexBytesRate float64 - preIndexTotal float64 - dropNext bool - maxTimestamp float64 + preIndexTotal float64 + dropNext bool + maxTimestamp float64 ) for _, dateBk := range bks { if dateBkVal, ok := dateBk.(map[string]interface{}); ok { @@ -786,11 +786,11 @@ func (h *APIHandler) QueryQPS(query util.MapStr, bucketSizeInSeconds int) (map[s if preIndexTotal > 0 { //if value of indexTotal is decreasing, drop the next value, //and we will drop current and next qps value - if indexTotalVal - preIndexTotal < 0 { + if indexTotalVal-preIndexTotal < 0 { dropNext = true preIndexTotal = indexTotalVal continue - }else{ + } else { dropNext = false } } @@ -866,11 +866,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ { "match": util.MapStr{ reqBody.SearchField: util.MapStr{ - "query": reqBody.Keyword, - "fuzziness": "AUTO", - "max_expansions": 10, - "prefix_length": 2, - "boost": 2, + "query": reqBody.Keyword, + "fuzziness": "AUTO", + "max_expansions": 10, + "prefix_length": 2, + "boost": 2, }, }, }, @@ -912,11 +912,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ { "match": util.MapStr{ "search_text": util.MapStr{ - "query": reqBody.Keyword, - "fuzziness": "AUTO", - "max_expansions": 10, - "prefix_length": 2, - "boost": 2, + "query": reqBody.Keyword, + "fuzziness": "AUTO", + "max_expansions": 10, + "prefix_length": 2, + "boost": 2, }, }, }, diff --git a/modules/elastic/api/v1/index_metrics.go b/modules/elastic/api/v1/index_metrics.go index ebbb52e8..283ec5e9 100644 --- a/modules/elastic/api/v1/index_metrics.go +++ b/modules/elastic/api/v1/index_metrics.go @@ -39,54 +39,53 @@ import ( ) const ( - IndexStorageMetricKey = "index_storage" - SegmentCountMetricKey = "segment_count" - DocCountMetricKey = "doc_count" - DocsDeletedMetricKey = "docs_deleted" - QueryTimesMetricKey = "query_times" - FetchTimesMetricKey = "fetch_times" - ScrollTimesMetricKey = "scroll_times" - MergeTimesMetricKey = "merge_times" - RefreshTimesMetricKey = "refresh_times" - FlushTimesMetricKey = "flush_times" - IndexingRateMetricKey = "indexing_rate" - IndexingBytesMetricKey = "indexing_bytes" - IndexingLatencyMetricKey = "indexing_latency" - QueryLatencyMetricKey = "query_latency" - FetchLatencyMetricKey = "fetch_latency" - MergeLatencyMetricKey = "merge_latency" - RefreshLatencyMetricKey = "refresh_latency" - ScrollLatencyMetricKey = "scroll_latency" - FlushLatencyMetricKey = "flush_latency" - QueryCacheMetricKey = "query_cache" - RequestCacheMetricKey = "request_cache" - RequestCacheHitMetricKey = "request_cache_hit" - RequestCacheMissMetricKey = "request_cache_miss" - QueryCacheCountMetricKey = "query_cache_count" - QueryCacheHitMetricKey = "query_cache_hit" - QueryCacheMissMetricKey = "query_cache_miss" - FielddataCacheMetricKey = "fielddata_cache" - SegmentMemoryMetricKey = "segment_memory" - SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory" - SegmentTermsMemoryMetricKey = "segment_terms_memory" - SegmentFieldsMemoryMetricKey = "segment_fields_memory" + IndexStorageMetricKey = "index_storage" + SegmentCountMetricKey = "segment_count" + DocCountMetricKey = "doc_count" + DocsDeletedMetricKey = "docs_deleted" + QueryTimesMetricKey = "query_times" + FetchTimesMetricKey = "fetch_times" + ScrollTimesMetricKey = "scroll_times" + MergeTimesMetricKey = "merge_times" + RefreshTimesMetricKey = "refresh_times" + FlushTimesMetricKey = "flush_times" + IndexingRateMetricKey = "indexing_rate" + IndexingBytesMetricKey = "indexing_bytes" + IndexingLatencyMetricKey = "indexing_latency" + QueryLatencyMetricKey = "query_latency" + FetchLatencyMetricKey = "fetch_latency" + MergeLatencyMetricKey = "merge_latency" + RefreshLatencyMetricKey = "refresh_latency" + ScrollLatencyMetricKey = "scroll_latency" + FlushLatencyMetricKey = "flush_latency" + QueryCacheMetricKey = "query_cache" + RequestCacheMetricKey = "request_cache" + RequestCacheHitMetricKey = "request_cache_hit" + RequestCacheMissMetricKey = "request_cache_miss" + QueryCacheCountMetricKey = "query_cache_count" + QueryCacheHitMetricKey = "query_cache_hit" + QueryCacheMissMetricKey = "query_cache_miss" + FielddataCacheMetricKey = "fielddata_cache" + SegmentMemoryMetricKey = "segment_memory" + SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory" + SegmentTermsMemoryMetricKey = "segment_terms_memory" + SegmentFieldsMemoryMetricKey = "segment_fields_memory" SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory" SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory" - DocPercentMetricKey = "doc_percent" - SegmentNormsMetricKey = "segment_norms_memory" - SegmentPointsMetricKey = "segment_points_memory" - VersionMapMetricKey = "segment_version_map" - FixedBitSetMetricKey = "segment_fixed_bit_set" - + DocPercentMetricKey = "doc_percent" + SegmentNormsMetricKey = "segment_norms_memory" + SegmentPointsMetricKey = "segment_points_memory" + VersionMapMetricKey = "segment_version_map" + FixedBitSetMetricKey = "segment_fixed_bit_set" ) -func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, metricKey string) (map[string]*common.MetricItem, error){ - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) +func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, metricKey string) (map[string]*common.MetricItem, error) { + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) var must = []util.MapStr{ { - "term":util.MapStr{ - "metadata.labels.cluster_id":util.MapStr{ + "term": util.MapStr{ + "metadata.labels.cluster_id": util.MapStr{ "value": clusterID, }, }, @@ -108,7 +107,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu } var ( indexNames []string - err error + err error ) if indexName != "" { indexNames = strings.Split(indexName, ",") @@ -116,11 +115,11 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu if !hasAllPrivilege && len(allowedIndices) == 0 { return nil, nil } - if !hasAllPrivilege{ + if !hasAllPrivilege { namePattern := radix.Compile(allowedIndices...) var filterNames []string for _, name := range indexNames { - if namePattern.Match(name){ + if namePattern.Match(name) { filterNames = append(filterNames, name) } } @@ -131,7 +130,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu } top = len(indexNames) - }else{ + } else { indexNames, err = h.getTopIndexName(req, clusterID, top, 15) if err != nil { log.Error(err) @@ -146,8 +145,8 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu }) } - query:=map[string]interface{}{} - query["query"]=util.MapStr{ + query := map[string]interface{}{} + query["query"] = util.MapStr{ "bool": util.MapStr{ "must": must, "must_not": []util.MapStr{ @@ -295,32 +294,32 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu case FlushTimesMetricKey: //flush 次数 flushTimesMetric := newMetricItem(FlushTimesMetricKey, 6, OperationGroupKey) - flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + flushTimesMetric.AddAxi("flush times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "flush_times", - Field: "payload.elasticsearch.index_stats.total.flush.total", - ID: util.GetUUID(), + Key: "flush_times", + Field: "payload.elasticsearch.index_stats.total.flush.total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: flushTimesMetric, - FormatType: "num", - Units: "requests/s", + MetricItem: flushTimesMetric, + FormatType: "num", + Units: "requests/s", }) case IndexingRateMetricKey: //写入速率 indexingRateMetric := newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey) - indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + indexingRateMetric.AddAxi("Indexing rate", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "indexing_rate", - Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total", - ID: util.GetUUID(), + Key: "indexing_rate", + Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexingRateMetric, - FormatType: "num", - Units: "doc/s", + MetricItem: indexingRateMetric, + FormatType: "num", + Units: "doc/s", }) case IndexingBytesMetricKey: indexingBytesMetric := newMetricItem(IndexingBytesMetricKey, 2, OperationGroupKey) - indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + indexingBytesMetric.AddAxi("Indexing bytes", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ Key: "indexing_bytes", Field: "payload.elasticsearch.index_stats.primaries.store.size_in_bytes", @@ -333,382 +332,381 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu case IndexingLatencyMetricKey: //写入时延 indexingLatencyMetric := newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey) - indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + indexingLatencyMetric.AddAxi("Indexing latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "indexing_latency", - Field: "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis", + Key: "indexing_latency", + Field: "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis", Field2: "payload.elasticsearch.index_stats.primaries.indexing.index_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexingLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: indexingLatencyMetric, + FormatType: "num", + Units: "ms", }) case QueryLatencyMetricKey: //查询时延 queryLatencyMetric := newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey) - queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + queryLatencyMetric.AddAxi("Query latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "query_latency", - Field: "payload.elasticsearch.index_stats.total.search.query_time_in_millis", + Key: "query_latency", + Field: "payload.elasticsearch.index_stats.total.search.query_time_in_millis", Field2: "payload.elasticsearch.index_stats.total.search.query_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: queryLatencyMetric, + FormatType: "num", + Units: "ms", }) case FetchLatencyMetricKey: //fetch时延 fetchLatencyMetric := newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey) - fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + fetchLatencyMetric.AddAxi("Fetch latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "fetch_latency", - Field: "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis", + Key: "fetch_latency", + Field: "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis", Field2: "payload.elasticsearch.index_stats.total.search.fetch_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: fetchLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: fetchLatencyMetric, + FormatType: "num", + Units: "ms", }) case MergeLatencyMetricKey: //merge时延 mergeLatencyMetric := newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey) - mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + mergeLatencyMetric.AddAxi("Merge latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "merge_latency", - Field: "payload.elasticsearch.index_stats.total.merges.total_time_in_millis", + Key: "merge_latency", + Field: "payload.elasticsearch.index_stats.total.merges.total_time_in_millis", Field2: "payload.elasticsearch.index_stats.total.merges.total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: mergeLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: mergeLatencyMetric, + FormatType: "num", + Units: "ms", }) case RefreshLatencyMetricKey: //refresh时延 refreshLatencyMetric := newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey) - refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + refreshLatencyMetric.AddAxi("Refresh latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "refresh_latency", - Field: "payload.elasticsearch.index_stats.total.refresh.total_time_in_millis", + Key: "refresh_latency", + Field: "payload.elasticsearch.index_stats.total.refresh.total_time_in_millis", Field2: "payload.elasticsearch.index_stats.total.refresh.total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: refreshLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: refreshLatencyMetric, + FormatType: "num", + Units: "ms", }) case ScrollLatencyMetricKey: //scroll时延 scrollLatencyMetric := newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey) - scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + scrollLatencyMetric.AddAxi("Scroll Latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "scroll_latency", - Field: "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis", + Key: "scroll_latency", + Field: "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis", Field2: "payload.elasticsearch.index_stats.total.search.scroll_total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: scrollLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: scrollLatencyMetric, + FormatType: "num", + Units: "ms", }) case FlushLatencyMetricKey: //flush 时延 flushLatencyMetric := newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey) - flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) + flushLatencyMetric.AddAxi("Flush latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "flush_latency", - Field: "payload.elasticsearch.index_stats.total.flush.total_time_in_millis", + Key: "flush_latency", + Field: "payload.elasticsearch.index_stats.total.flush.total_time_in_millis", Field2: "payload.elasticsearch.index_stats.total.flush.total", Calc: func(value, value2 float64) float64 { - return value/value2 + return value / value2 }, - ID: util.GetUUID(), + ID: util.GetUUID(), IsDerivative: true, - MetricItem: flushLatencyMetric, - FormatType: "num", - Units: "ms", + MetricItem: flushLatencyMetric, + FormatType: "num", + Units: "ms", }) case QueryCacheMetricKey: //queryCache queryCacheMetric := newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey) - queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + queryCacheMetric.AddAxi("Query cache", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "query_cache", - Field: "payload.elasticsearch.index_stats.total.query_cache.memory_size_in_bytes", - ID: util.GetUUID(), + Key: "query_cache", + Field: "payload.elasticsearch.index_stats.total.query_cache.memory_size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: queryCacheMetric, - FormatType: "bytes", - Units: "", + MetricItem: queryCacheMetric, + FormatType: "bytes", + Units: "", }) case RequestCacheMetricKey: //requestCache requestCacheMetric := newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey) - requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + requestCacheMetric.AddAxi("request cache", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "request_cache", - Field: "payload.elasticsearch.index_stats.total.request_cache.memory_size_in_bytes", - ID: util.GetUUID(), + Key: "request_cache", + Field: "payload.elasticsearch.index_stats.total.request_cache.memory_size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: requestCacheMetric, - FormatType: "bytes", - Units: "", + MetricItem: requestCacheMetric, + FormatType: "bytes", + Units: "", }) case RequestCacheHitMetricKey: // Request Cache Hit - requestCacheHitMetric:=newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey) - requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "request_cache_hit", - Field: "payload.elasticsearch.index_stats.total.request_cache.hit_count", - ID: util.GetUUID(), + requestCacheHitMetric := newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey) + requestCacheHitMetric.AddAxi("request cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "request_cache_hit", + Field: "payload.elasticsearch.index_stats.total.request_cache.hit_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: requestCacheHitMetric, - FormatType: "num", - Units: "hits", + MetricItem: requestCacheHitMetric, + FormatType: "num", + Units: "hits", }) case RequestCacheMissMetricKey: // Request Cache Miss - requestCacheMissMetric:=newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey) - requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "request_cache_miss", - Field: "payload.elasticsearch.index_stats.total.request_cache.miss_count", - ID: util.GetUUID(), + requestCacheMissMetric := newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey) + requestCacheMissMetric.AddAxi("request cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "request_cache_miss", + Field: "payload.elasticsearch.index_stats.total.request_cache.miss_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: requestCacheMissMetric, - FormatType: "num", - Units: "misses", + MetricItem: requestCacheMissMetric, + FormatType: "num", + Units: "misses", }) case QueryCacheCountMetricKey: // Query Cache Count - queryCacheCountMetric:=newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey) - queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "query_cache_count", - Field: "payload.elasticsearch.index_stats.total.query_cache.cache_count", - ID: util.GetUUID(), + queryCacheCountMetric := newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey) + queryCacheCountMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "query_cache_count", + Field: "payload.elasticsearch.index_stats.total.query_cache.cache_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryCacheCountMetric, - FormatType: "num", - Units: "", + MetricItem: queryCacheCountMetric, + FormatType: "num", + Units: "", }) case QueryCacheHitMetricKey: // Query Cache Miss - queryCacheHitMetric:=newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey) - queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "query_cache_hit", - Field: "payload.elasticsearch.index_stats.total.query_cache.hit_count", - ID: util.GetUUID(), + queryCacheHitMetric := newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey) + queryCacheHitMetric.AddAxi("query cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "query_cache_hit", + Field: "payload.elasticsearch.index_stats.total.query_cache.hit_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryCacheHitMetric, - FormatType: "num", - Units: "hits", + MetricItem: queryCacheHitMetric, + FormatType: "num", + Units: "hits", }) case QueryCacheMissMetricKey: // Query Cache Miss - queryCacheMissMetric:=newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey) - queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "query_cache_miss", - Field: "payload.elasticsearch.index_stats.total.query_cache.miss_count", - ID: util.GetUUID(), + queryCacheMissMetric := newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey) + queryCacheMissMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "query_cache_miss", + Field: "payload.elasticsearch.index_stats.total.query_cache.miss_count", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryCacheMissMetric, - FormatType: "num", - Units: "misses", + MetricItem: queryCacheMissMetric, + FormatType: "num", + Units: "misses", }) case FielddataCacheMetricKey: // Fielddata内存占用大小 - fieldDataCacheMetric:=newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey) - fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "fielddata_cache", - Field: "payload.elasticsearch.index_stats.total.fielddata.memory_size_in_bytes", - ID: util.GetUUID(), + fieldDataCacheMetric := newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey) + fieldDataCacheMetric.AddAxi("FieldData Cache", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "fielddata_cache", + Field: "payload.elasticsearch.index_stats.total.fielddata.memory_size_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: fieldDataCacheMetric, - FormatType: "bytes", - Units: "", + MetricItem: fieldDataCacheMetric, + FormatType: "bytes", + Units: "", }) case SegmentMemoryMetricKey: //segment memory segmentMemoryMetric := newMetricItem(SegmentMemoryMetricKey, 13, MemoryGroupKey) - segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentMemoryMetric.AddAxi("Segment memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "segment_memory", - Field: "payload.elasticsearch.index_stats.total.segments.memory_in_bytes", - ID: util.GetUUID(), + Key: "segment_memory", + Field: "payload.elasticsearch.index_stats.total.segments.memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentDocValuesMemoryMetricKey: //segment doc values memory docValuesMemoryMetric := newMetricItem(SegmentDocValuesMemoryMetricKey, 13, MemoryGroupKey) - docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + docValuesMemoryMetric.AddAxi("Segment Doc values Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "segment_doc_values_memory", - Field: "payload.elasticsearch.index_stats.total.segments.doc_values_memory_in_bytes", - ID: util.GetUUID(), + Key: "segment_doc_values_memory", + Field: "payload.elasticsearch.index_stats.total.segments.doc_values_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: docValuesMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: docValuesMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentTermsMemoryMetricKey: //segment terms memory termsMemoryMetric := newMetricItem(SegmentTermsMemoryMetricKey, 13, MemoryGroupKey) - termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + termsMemoryMetric.AddAxi("Segment Terms Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "segment_terms_memory", - Field: "payload.elasticsearch.index_stats.total.segments.terms_memory_in_bytes", - ID: util.GetUUID(), + Key: "segment_terms_memory", + Field: "payload.elasticsearch.index_stats.total.segments.terms_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: termsMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: termsMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentFieldsMemoryMetricKey: //segment fields memory fieldsMemoryMetric := newMetricItem(SegmentFieldsMemoryMetricKey, 13, MemoryGroupKey) - fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + fieldsMemoryMetric.AddAxi("Segment Fields Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: "segment_fields_memory", - Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes", - ID: util.GetUUID(), + Key: "segment_fields_memory", + Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: fieldsMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: fieldsMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentIndexWriterMemoryMetricKey: // segment index writer memory - segmentIndexWriterMemoryMetric:=newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey) - segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "segment_index_writer_memory", - Field: "payload.elasticsearch.index_stats.total.segments.index_writer_memory_in_bytes", - ID: util.GetUUID(), + segmentIndexWriterMemoryMetric := newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey) + segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "segment_index_writer_memory", + Field: "payload.elasticsearch.index_stats.total.segments.index_writer_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentIndexWriterMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentIndexWriterMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentTermVectorsMemoryMetricKey: // segment term vectors memory - segmentTermVectorsMemoryMetric:=newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey) - segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) - indexMetricItems=append(indexMetricItems, GroupMetricItem{ - Key: "segment_term_vectors_memory", - Field: "payload.elasticsearch.index_stats.total.segments.term_vectors_memory_in_bytes", - ID: util.GetUUID(), + segmentTermVectorsMemoryMetric := newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey) + segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true) + indexMetricItems = append(indexMetricItems, GroupMetricItem{ + Key: "segment_term_vectors_memory", + Field: "payload.elasticsearch.index_stats.total.segments.term_vectors_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentTermVectorsMemoryMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentTermVectorsMemoryMetric, + FormatType: "bytes", + Units: "", }) case SegmentNormsMetricKey: segmentNormsMetric := newMetricItem(SegmentNormsMetricKey, 17, MemoryGroupKey) - segmentNormsMetric.AddAxi("Segment norms memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentNormsMetric.AddAxi("Segment norms memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: SegmentNormsMetricKey, - Field: "payload.elasticsearch.index_stats.total.segments.norms_memory_in_bytes", - ID: util.GetUUID(), + Key: SegmentNormsMetricKey, + Field: "payload.elasticsearch.index_stats.total.segments.norms_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentNormsMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentNormsMetric, + FormatType: "bytes", + Units: "", }) case SegmentPointsMetricKey: segmentPointsMetric := newMetricItem(SegmentPointsMetricKey, 18, MemoryGroupKey) - segmentPointsMetric.AddAxi("Segment points memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentPointsMetric.AddAxi("Segment points memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: SegmentPointsMetricKey, - Field: "payload.elasticsearch.index_stats.total.segments.points_memory_in_bytes", - ID: util.GetUUID(), + Key: SegmentPointsMetricKey, + Field: "payload.elasticsearch.index_stats.total.segments.points_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentPointsMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentPointsMetric, + FormatType: "bytes", + Units: "", }) case VersionMapMetricKey: segmentVersionMapMetric := newMetricItem(VersionMapMetricKey, 18, MemoryGroupKey) - segmentVersionMapMetric.AddAxi("Segment version map memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentVersionMapMetric.AddAxi("Segment version map memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: VersionMapMetricKey, - Field: "payload.elasticsearch.index_stats.total.segments.version_map_memory_in_bytes", - ID: util.GetUUID(), + Key: VersionMapMetricKey, + Field: "payload.elasticsearch.index_stats.total.segments.version_map_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentVersionMapMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentVersionMapMetric, + FormatType: "bytes", + Units: "", }) case FixedBitSetMetricKey: segmentFixedBitSetMetric := newMetricItem(FixedBitSetMetricKey, 18, MemoryGroupKey) - segmentFixedBitSetMetric.AddAxi("Segment fixed bit set memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) + segmentFixedBitSetMetric.AddAxi("Segment fixed bit set memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) indexMetricItems = append(indexMetricItems, GroupMetricItem{ - Key: FixedBitSetMetricKey, - Field: "payload.elasticsearch.index_stats.total.segments.fixed_bit_set_memory_in_bytes", - ID: util.GetUUID(), + Key: FixedBitSetMetricKey, + Field: "payload.elasticsearch.index_stats.total.segments.fixed_bit_set_memory_in_bytes", + ID: util.GetUUID(), IsDerivative: false, - MetricItem: segmentFixedBitSetMetric, - FormatType: "bytes", - Units: "", + MetricItem: segmentFixedBitSetMetric, + FormatType: "bytes", + Units: "", }) } + aggs := map[string]interface{}{} - aggs:=map[string]interface{}{} - - for _,metricItem:=range indexMetricItems { - aggs[metricItem.ID]=util.MapStr{ - "max":util.MapStr{ + for _, metricItem := range indexMetricItems { + aggs[metricItem.ID] = util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field, }, } - if metricItem.Field2 != ""{ - aggs[metricItem.ID + "_field2"]=util.MapStr{ - "max":util.MapStr{ + if metricItem.Field2 != "" { + aggs[metricItem.ID+"_field2"] = util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field2, }, } } - if metricItem.IsDerivative{ - aggs[metricItem.ID+"_deriv"]=util.MapStr{ - "derivative":util.MapStr{ + if metricItem.IsDerivative { + aggs[metricItem.ID+"_deriv"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID, }, } if metricItem.Field2 != "" { - aggs[metricItem.ID + "_deriv_field2"]=util.MapStr{ - "derivative":util.MapStr{ + aggs[metricItem.ID+"_deriv_field2"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID + "_field2", }, } @@ -720,8 +718,8 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu return nil, err } - query["size"]=0 - query["aggs"]= util.MapStr{ + query["size"] = 0 + query["aggs"] = util.MapStr{ "group_by_level": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.index_name", @@ -732,11 +730,11 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu }, "aggs": util.MapStr{ "dates": util.MapStr{ - "date_histogram":util.MapStr{ - "field": "timestamp", + "date_histogram": util.MapStr{ + "field": "timestamp", intervalField: bucketSizeStr, }, - "aggs":aggs, + "aggs": aggs, }, "max_store": util.MapStr{ "max": util.MapStr{ @@ -750,7 +748,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu } -func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error){ +func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error) { ver := h.Client().GetVersion() cr, _ := util.VersionCompare(ver.Number, "6.1") if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && cr == -1 { @@ -758,8 +756,8 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in } var ( now = time.Now() - max = now.UnixNano()/1e6 - min = now.Add(-time.Duration(lastMinutes) * time.Minute).UnixNano()/1e6 + max = now.UnixNano() / 1e6 + min = now.Add(-time.Duration(lastMinutes)*time.Minute).UnixNano() / 1e6 ) var must = []util.MapStr{ { @@ -791,8 +789,8 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in if !hasAllPrivilege { must = append(must, util.MapStr{ "query_string": util.MapStr{ - "query": strings.Join(allowedIndices, " "), - "fields": []string{"metadata.labels.index_name"}, + "query": strings.Join(allowedIndices, " "), + "fields": []string{"metadata.labels.index_name"}, "default_operator": "OR", }, }) @@ -850,7 +848,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in }, "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -889,7 +887,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in }, "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -909,20 +907,20 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in }, }, } - response,err:=elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(),util.MustToJSONBytes(query)) - if err!=nil{ + response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query)) + if err != nil { log.Error(err) return nil, err } - var maxQpsKVS = map[string] float64{} + var maxQpsKVS = map[string]float64{} for _, agg := range response.Aggregations { for _, bk := range agg.Buckets { key := bk["key"].(string) if maxQps, ok := bk["max_qps"].(map[string]interface{}); ok { val := maxQps["value"].(float64) - if _, ok = maxQpsKVS[key] ; ok { + if _, ok = maxQpsKVS[key]; ok { maxQpsKVS[key] = maxQpsKVS[key] + val - }else{ + } else { maxQpsKVS[key] = val } } @@ -943,7 +941,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in length = len(qpsValues) } indexNames := []string{} - for i := 0; i t[j].Value //desc } -func (t TopTermOrder) Swap(i, j int){ +func (t TopTermOrder) Swap(i, j int) { t[i], t[j] = t[j], t[i] } diff --git a/modules/elastic/api/v1/index_overview.go b/modules/elastic/api/v1/index_overview.go index b04a519c..e7bc9a82 100644 --- a/modules/elastic/api/v1/index_overview.go +++ b/modules/elastic/api/v1/index_overview.go @@ -72,7 +72,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context, for _, v := range results.Result { result, ok := v.(map[string]interface{}) if ok { - if indexID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "index_id"}, result); ok { + if indexID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "index_id"}, result); ok { summary := map[string]interface{}{} if docs, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_stats", "total", "docs"}, result); ok { if docsM, ok := docs.(map[string]interface{}); ok { @@ -97,7 +97,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context, if sinfo, ok := shardInfo.([]interface{}); ok { unassignedCount := 0 for _, item := range sinfo { - if itemMap, ok := item.(map[string]interface{}); ok{ + if itemMap, ok := item.(map[string]interface{}); ok { if itemMap["state"] == "UNASSIGNED" { unassignedCount++ } @@ -121,7 +121,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context, return } firstClusterID, firstIndexName = parts[0], parts[1] - }else{ + } else { h.WriteError(w, fmt.Sprintf("invalid index_id: %v", indexID), http.StatusInternalServerError) return } @@ -137,35 +137,35 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context, } var metricLen = 15 // 索引速率 - indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) - indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) + indexMetric := newMetricItem("indexing", 1, OperationGroupKey) + indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) nodeMetricItems := []GroupMetricItem{} - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "indexing", - Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total", - ID: util.GetUUID(), + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "indexing", + Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexMetric, - FormatType: "num", - Units: "Indexing/s", + MetricItem: indexMetric, + FormatType: "num", + Units: "Indexing/s", }) - queryMetric:=newMetricItem("search", 2, OperationGroupKey) - queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "search", - Field: "payload.elasticsearch.index_stats.total.search.query_total", - ID: util.GetUUID(), + queryMetric := newMetricItem("search", 2, OperationGroupKey) + queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "search", + Field: "payload.elasticsearch.index_stats.total.search.query_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryMetric, - FormatType: "num", - Units: "Search/s", + MetricItem: queryMetric, + FormatType: "num", + Units: "Search/s", }) - aggs:=map[string]interface{}{} - query :=map[string]interface{}{} - query["query"]=util.MapStr{ + aggs := map[string]interface{}{} + query := map[string]interface{}{} + query["query"] = util.MapStr{ "bool": util.MapStr{ - "must": []util.MapStr{ + "must": []util.MapStr{ { "term": util.MapStr{ "metadata.category": util.MapStr{ @@ -190,7 +190,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context, { "range": util.MapStr{ "timestamp": util.MapStr{ - "gte": fmt.Sprintf("now-%ds", metricLen * bucketSize), + "gte": fmt.Sprintf("now-%ds", metricLen*bucketSize), }, }, }, @@ -198,15 +198,15 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context, }, } - for _,metricItem:=range nodeMetricItems{ - aggs[metricItem.ID]=util.MapStr{ - "max":util.MapStr{ + for _, metricItem := range nodeMetricItems { + aggs[metricItem.ID] = util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field, }, } - if metricItem.IsDerivative{ - aggs[metricItem.ID+"_deriv"]=util.MapStr{ - "derivative":util.MapStr{ + if metricItem.IsDerivative { + aggs[metricItem.ID+"_deriv"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID, }, } @@ -218,8 +218,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context, if err != nil { panic(err) } - query["size"]=0 - query["aggs"]= util.MapStr{ + query["size"] = 0 + query["aggs"] = util.MapStr{ "group_by_level": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.index_id", @@ -227,11 +227,11 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context, }, "aggs": util.MapStr{ "dates": util.MapStr{ - "date_histogram":util.MapStr{ - "field": "timestamp", + "date_histogram": util.MapStr{ + "field": "timestamp", intervalField: bucketSizeStr, }, - "aggs":aggs, + "aggs": aggs, }, }, }, @@ -245,9 +245,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context, indexMetrics := map[string]util.MapStr{} for key, item := range metrics { for _, line := range item.Lines { - if _, ok := indexMetrics[line.Metric.Label]; !ok{ - indexMetrics[line.Metric.Label] = util.MapStr{ - } + if _, ok := indexMetrics[line.Metric.Label]; !ok { + indexMetrics[line.Metric.Label] = util.MapStr{} } indexMetrics[line.Metric.Label][key] = line.Data } @@ -292,11 +291,11 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h indexID := ps.MustGetParameter("index") parts := strings.Split(indexID, ":") if len(parts) > 1 && !h.IsIndexAllowed(req, clusterID, parts[1]) { - h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) + h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) return } if len(parts) < 2 { - h.WriteError(w, "invalid index id: "+ indexID, http.StatusInternalServerError) + h.WriteError(w, "invalid index id: "+indexID, http.StatusInternalServerError) return } @@ -320,7 +319,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h return } q1 := orm.Query{ - Size: 1, + Size: 1, WildcardIndex: true, } q1.Conds = orm.And( @@ -340,7 +339,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h summary["aliases"] = aliases summary["timestamp"] = hit["timestamp"] summary["index_info"] = util.MapStr{ - "health":health, + "health": health, "status": state, } } @@ -361,11 +360,11 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h if tm, ok := result["timestamp"].(string); ok { issueTime, _ := time.Parse(time.RFC3339, tm) if time.Now().Sub(issueTime).Seconds() > 30 { - health, _:= util.GetMapValueByKeys([]string{"metadata", "labels", "health_status"}, response.Hits.Hits[0].Source) + health, _ := util.GetMapValueByKeys([]string{"metadata", "labels", "health_status"}, response.Hits.Hits[0].Source) infoM["health"] = health } } - state, _:= util.GetMapValueByKeys([]string{"metadata", "labels", "state"}, response.Hits.Hits[0].Source) + state, _ := util.GetMapValueByKeys([]string{"metadata", "labels", "state"}, response.Hits.Hits[0].Source) if state == "delete" { infoM["status"] = "delete" infoM["health"] = "N/A" @@ -377,7 +376,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h if sinfo, ok := shardInfo.([]interface{}); ok { unassignedCount := 0 for _, item := range sinfo { - if itemMap, ok := item.(map[string]interface{}); ok{ + if itemMap, ok := item.(map[string]interface{}); ok { if itemMap["state"] == "UNASSIGNED" { unassignedCount++ } @@ -398,7 +397,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps clusterID := ps.MustGetParameter("id") indexName := ps.MustGetParameter("index") q1 := orm.Query{ - Size: 1, + Size: 1, WildcardIndex: true, } q1.Conds = orm.And( @@ -411,9 +410,9 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps q1.AddSort("timestamp", orm.DESC) err, result := orm.Search(&event.Event{}, &q1) if err != nil { - h.WriteJSON(w,util.MapStr{ + h.WriteJSON(w, util.MapStr{ "error": err.Error(), - }, http.StatusInternalServerError ) + }, http.StatusInternalServerError) return } var shardInfo interface{} = []interface{}{} @@ -512,7 +511,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ log.Error(err) } metrics["index_health"] = healthMetric - }else { + } else { switch metricKey { case IndexThroughputMetricKey: metricItem := newMetricItem("index_throughput", 1, OperationGroupKey) @@ -582,7 +581,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ minBucketSize, err := GetMetricMinBucketSize(clusterID, MetricTypeIndexStats) if err != nil { log.Error(err) - }else{ + } else { metrics[metricKey].MinBucketSize = int64(minBucketSize) } } @@ -591,8 +590,8 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ h.WriteJSON(w, resBody, http.StatusOK) } -func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){ - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) +func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int) (*common.MetricItem, error) { + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) if err != nil { return nil, err @@ -645,14 +644,14 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str "aggs": util.MapStr{ "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ "group_status": util.MapStr{ "terms": util.MapStr{ "field": "payload.elasticsearch.index_health.status", - "size": 5, + "size": 5, }, }, }, @@ -666,8 +665,8 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str return nil, err } - metricItem:=newMetricItem("index_health", 1, "") - metricItem.AddLine("health","Health","","group1","payload.elasticsearch.index_health.status","max",bucketSizeStr,"%","ratio","0.[00]","0.[00]",false,false) + metricItem := newMetricItem("index_health", 1, "") + metricItem.AddLine("health", "Health", "", "group1", "payload.elasticsearch.index_health.status", "max", bucketSizeStr, "%", "ratio", "0.[00]", "0.[00]", false, false) metricData := []interface{}{} if response.StatusCode == 200 { @@ -683,8 +682,7 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str return metricItem, nil } - -func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[string][]interface{}, error){ +func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string) (map[string][]interface{}, error) { q := orm.Query{ WildcardIndex: true, } @@ -698,53 +696,53 @@ func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[ "ranges": []util.MapStr{ { "from": "now-13d/d", - "to": "now-12d/d", + "to": "now-12d/d", }, { "from": "now-12d/d", - "to": "now-11d/d", + "to": "now-11d/d", }, { "from": "now-11d/d", - "to": "now-10d/d", + "to": "now-10d/d", }, { "from": "now-10d/d", - "to": "now-9d/d", + "to": "now-9d/d", }, { "from": "now-9d/d", - "to": "now-8d/d", + "to": "now-8d/d", }, { "from": "now-8d/d", - "to": "now-7d/d", + "to": "now-7d/d", }, { "from": "now-7d/d", - "to": "now-6d/d", + "to": "now-6d/d", }, { "from": "now-6d/d", - "to": "now-5d/d", + "to": "now-5d/d", }, { "from": "now-5d/d", - "to": "now-4d/d", + "to": "now-4d/d", }, { "from": "now-4d/d", - "to": "now-3d/d", - },{ + "to": "now-3d/d", + }, { "from": "now-3d/d", - "to": "now-2d/d", + "to": "now-2d/d", }, { "from": "now-2d/d", - "to": "now-1d/d", + "to": "now-1d/d", }, { "from": "now-1d/d", - "to": "now/d", + "to": "now/d", }, { "from": "now/d", - "to": "now", + "to": "now", }, }, }, @@ -817,16 +815,16 @@ func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[ } healthMap := map[string]int{} status := "unknown" - for _, hbkItem := range healthBks { + for _, hbkItem := range healthBks { if hitem, ok := hbkItem.(map[string]interface{}); ok { healthMap[hitem["key"].(string)] = 1 } } if _, ok = healthMap["red"]; ok { status = "red" - }else if _, ok = healthMap["yellow"]; ok { + } else if _, ok = healthMap["yellow"]; ok { status = "yellow" - }else if _, ok = healthMap["green"]; ok { + } else if _, ok = healthMap["green"]; ok { status = "green" } key := fmt.Sprintf("%s:%s", clusterID, indexName) @@ -838,7 +836,7 @@ func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[ } func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string] interface{}{} + resBody := map[string]interface{}{} id := ps.ByName("id") indexName := ps.ByName("index") if !h.IsIndexAllowed(req, id, indexName) { @@ -847,7 +845,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps }, http.StatusForbidden) return } - q := &orm.Query{ Size: 1} + q := &orm.Query{Size: 1} q.AddSort("timestamp", orm.DESC) q.Conds = orm.And( orm.Eq("metadata.category", "elasticsearch"), @@ -859,13 +857,13 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps err, result := orm.Search(event.Event{}, q) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } namesM := util.MapStr{} if len(result.Result) > 0 { if data, ok := result.Result[0].(map[string]interface{}); ok { if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_routing_table"}, data); exists { - if table, ok := routingTable.(map[string]interface{}); ok{ + if table, ok := routingTable.(map[string]interface{}); ok { if shardsM, ok := table["shards"].(map[string]interface{}); ok { for _, rows := range shardsM { if rowsArr, ok := rows.([]interface{}); ok { @@ -887,12 +885,12 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps } //node uuid - nodeIds := make([]interface{}, 0, len(namesM) ) + nodeIds := make([]interface{}, 0, len(namesM)) for name, _ := range namesM { nodeIds = append(nodeIds, name) } - q1 := &orm.Query{ Size: 100} + q1 := &orm.Query{Size: 100} q1.AddSort("timestamp", orm.DESC) q1.Conds = orm.And( orm.Eq("metadata.category", "elasticsearch"), @@ -902,7 +900,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps err, result = orm.Search(elastic.NodeConfig{}, q1) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } nodes := []interface{}{} for _, hit := range result.Result { @@ -922,11 +920,11 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps if v, ok := nodeId.(string); ok { ninfo := util.MapStr{ - "id": v, - "name": nodeName, - "ip": ip, - "port": port, - "status": status, + "id": v, + "name": nodeName, + "ip": ip, + "port": port, + "status": status, "timestamp": hitM["timestamp"], } nodes = append(nodes, ninfo) @@ -947,7 +945,7 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr } var must = []util.MapStr{} - if !util.StringInArray(ids, "*"){ + if !util.StringInArray(ids, "*") { must = append(must, util.MapStr{ "terms": util.MapStr{ @@ -958,9 +956,8 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr if keyword != "" { must = append(must, util.MapStr{ - "wildcard":util.MapStr{ - "metadata.index_name": - util.MapStr{"value": fmt.Sprintf("*%s*", keyword)}, + "wildcard": util.MapStr{ + "metadata.index_name": util.MapStr{"value": fmt.Sprintf("*%s*", keyword)}, }, }) } @@ -986,7 +983,6 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr }, } - esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) indexName := orm.GetIndexName(elastic.IndexConfig{}) resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl)) diff --git a/modules/elastic/api/v1/manage.go b/modules/elastic/api/v1/manage.go index de753919..56d58dac 100644 --- a/modules/elastic/api/v1/manage.go +++ b/modules/elastic/api/v1/manage.go @@ -545,7 +545,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http minBucketSize, err := GetMetricMinBucketSize(id, metricType) if err != nil { log.Error(err) - }else{ + } else { metrics[key].MinBucketSize = int64(minBucketSize) } } @@ -648,7 +648,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R } } } - }else{ + } else { metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, key) if err != nil { h.WriteError(w, err, http.StatusInternalServerError) @@ -660,7 +660,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R minBucketSize, err := GetMetricMinBucketSize(id, MetricTypeNodeStats) if err != nil { log.Error(err) - }else{ + } else { metrics[key].MinBucketSize = int64(minBucketSize) } } @@ -788,19 +788,20 @@ const ( ) const ( - ClusterStorageMetricKey = "cluster_storage" + ClusterStorageMetricKey = "cluster_storage" ClusterDocumentsMetricKey = "cluster_documents" - ClusterIndicesMetricKey = "cluster_indices" + ClusterIndicesMetricKey = "cluster_indices" ClusterNodeCountMetricKey = "node_count" - ClusterHealthMetricKey = "cluster_health" - ShardCountMetricKey = "shard_count" - CircuitBreakerMetricKey = "circuit_breaker" + ClusterHealthMetricKey = "cluster_health" + ShardCountMetricKey = "shard_count" + CircuitBreakerMetricKey = "circuit_breaker" ) + func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) { var ( - clusterMetricsResult = map[string]*common.MetricItem {} - err error + clusterMetricsResult = map[string]*common.MetricItem{} + err error ) switch metricKey { case ClusterDocumentsMetricKey, @@ -915,12 +916,14 @@ func (h *APIHandler) getClusterMetricsByKey(ctx context.Context, id string, buck } return h.getSingleMetrics(ctx, clusterMetricItems, query, bucketSize) } + const ( - IndexThroughputMetricKey = "index_throughput" + IndexThroughputMetricKey = "index_throughput" SearchThroughputMetricKey = "search_throughput" - IndexLatencyMetricKey = "index_latency" - SearchLatencyMetricKey = "search_latency" + IndexLatencyMetricKey = "index_latency" + SearchLatencyMetricKey = "search_latency" ) + func (h *APIHandler) GetClusterIndexMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) { bucketSizeStr := fmt.Sprintf("%vs", bucketSize) metricItems := []*common.MetricItem{} diff --git a/modules/elastic/api/v1/metrics_util.go b/modules/elastic/api/v1/metrics_util.go index 1fb3a744..37ff3d56 100644 --- a/modules/elastic/api/v1/metrics_util.go +++ b/modules/elastic/api/v1/metrics_util.go @@ -113,7 +113,7 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{} func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) (map[string]*common.MetricItem, error) { bucketSizeStr := fmt.Sprintf("%vs", bucketSize) queryDSL := util.MustToJSONBytes(query) - response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) + response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) if err != nil { return nil, err } @@ -229,11 +229,12 @@ func GetMinBucketSize() int { const ( MetricTypeClusterHealth = "cluster_health" - MetricTypeClusterStats = "cluster_stats" - MetricTypeNodeStats = "node_stats" - MetricTypeIndexStats = "index_stats" + MetricTypeClusterStats = "cluster_stats" + MetricTypeNodeStats = "node_stats" + MetricTypeIndexStats = "index_stats" ) -//GetMetricMinBucketSize returns twice the metrics collection interval based on the cluster ID and metric type + +// GetMetricMinBucketSize returns twice the metrics collection interval based on the cluster ID and metric type func GetMetricMinBucketSize(clusterID, metricType string) (int, error) { meta := elastic.GetMetadata(clusterID) if meta == nil { @@ -243,19 +244,19 @@ func GetMetricMinBucketSize(clusterID, metricType string) (int, error) { switch metricType { case MetricTypeClusterHealth: if meta.Config.MonitorConfigs != nil { - interval = meta.Config.MonitorConfigs.ClusterHealth.Interval + interval = meta.Config.MonitorConfigs.ClusterHealth.Interval } case MetricTypeClusterStats: if meta.Config.MonitorConfigs != nil { - interval = meta.Config.MonitorConfigs.ClusterStats.Interval + interval = meta.Config.MonitorConfigs.ClusterStats.Interval } case MetricTypeNodeStats: if meta.Config.MonitorConfigs != nil { - interval = meta.Config.MonitorConfigs.NodeStats.Interval + interval = meta.Config.MonitorConfigs.NodeStats.Interval } case MetricTypeIndexStats: if meta.Config.MonitorConfigs != nil { - interval = meta.Config.MonitorConfigs.IndexStats.Interval + interval = meta.Config.MonitorConfigs.IndexStats.Interval } default: return 0, fmt.Errorf("invalid metric name: %s", metricType) @@ -278,7 +279,7 @@ func (h *APIHandler) GetMetricRangeAndBucketSize(req *http.Request, clusterID, m } bucketSize := 0 - bucketSizeStr := h.GetParameterOrDefault(req, "bucket_size", "") //默认 10,每个 bucket 的时间范围,单位秒 + bucketSizeStr := h.GetParameterOrDefault(req, "bucket_size", "") //默认 10,每个 bucket 的时间范围,单位秒 if bucketSizeStr != "" { du, err := util.ParseDuration(bucketSizeStr) if err != nil { @@ -293,7 +294,7 @@ func (h *APIHandler) GetMetricRangeAndBucketSize(req *http.Request, clusterID, m maxStr := h.Get(req, "max", "") var ( minBucketSize = 0 - err error + err error ) //clusterID may be empty when querying host metrics if clusterID != "" { @@ -301,7 +302,7 @@ func (h *APIHandler) GetMetricRangeAndBucketSize(req *http.Request, clusterID, m if err != nil { return 0, 0, 0, fmt.Errorf("failed to get min bucket size for cluster [%s]:%w", clusterID, err) } - }else{ + } else { //default to 20 minBucketSize = 20 } diff --git a/modules/elastic/api/v1/node_overview.go b/modules/elastic/api/v1/node_overview.go index a4048576..d13c2383 100644 --- a/modules/elastic/api/v1/node_overview.go +++ b/modules/elastic/api/v1/node_overview.go @@ -45,40 +45,40 @@ import ( ) func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody:=util.MapStr{} - reqBody := struct{ - Keyword string `json:"keyword"` - Size int `json:"size"` - From int `json:"from"` - Aggregations []elastic.SearchAggParam `json:"aggs"` - Highlight elastic.SearchHighlightParam `json:"highlight"` - Filter elastic.SearchFilterParam `json:"filter"` - Sort []string `json:"sort"` - SearchField string `json:"search_field"` + resBody := util.MapStr{} + reqBody := struct { + Keyword string `json:"keyword"` + Size int `json:"size"` + From int `json:"from"` + Aggregations []elastic.SearchAggParam `json:"aggs"` + Highlight elastic.SearchHighlightParam `json:"highlight"` + Filter elastic.SearchFilterParam `json:"filter"` + Sort []string `json:"sort"` + SearchField string `json:"search_field"` }{} err := h.DecodeJSON(req, &reqBody) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) return } aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations) aggs["term_cluster_id"] = util.MapStr{ "terms": util.MapStr{ "field": "metadata.cluster_id", - "size": 1000, + "size": 1000, }, "aggs": util.MapStr{ "term_cluster_name": util.MapStr{ "terms": util.MapStr{ "field": "metadata.cluster_name", - "size": 1, + "size": 1, }, }, }, } - var should =[]util.MapStr{} - if reqBody.SearchField != ""{ + var should = []util.MapStr{} + if reqBody.SearchField != "" { should = []util.MapStr{ { "prefix": util.MapStr{ @@ -101,7 +101,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request }, }, } - }else{ + } else { should = []util.MapStr{ { "prefix": util.MapStr{ @@ -143,30 +143,25 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request } clusterFilter, hasPrivilege := h.GetClusterFilter(req, "metadata.cluster_id") if !hasPrivilege && clusterFilter == nil { - h.WriteJSON(w, elastic.SearchResponse{ - - }, http.StatusOK) + h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK) return } - must := []interface{}{ - } + must := []interface{}{} if !hasPrivilege && clusterFilter != nil { must = append(must, clusterFilter) } - - query := util.MapStr{ "aggs": aggs, "size": reqBody.Size, - "from": reqBody.From, + "from": reqBody.From, "highlight": elastic.BuildSearchHighlight(&reqBody.Highlight), "query": util.MapStr{ "bool": util.MapStr{ "minimum_should_match": 1, - "filter": elastic.BuildSearchTermFilter(reqBody.Filter), - "should": should, - "must": must, + "filter": elastic.BuildSearchTermFilter(reqBody.Filter), + "should": should, + "must": must, }, }, "sort": []util.MapStr{ @@ -178,7 +173,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request }, } if len(reqBody.Sort) > 1 { - query["sort"] = []util.MapStr{ + query["sort"] = []util.MapStr{ { reqBody.Sort[0]: util.MapStr{ "order": reqBody.Sort[1], @@ -190,7 +185,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.NodeConfig{}), dsl) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) return } w.Write(util.MustToJSONBytes(response)) @@ -293,7 +288,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps source["shard_info"] = shardInfo } if tempClusterID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "cluster_id"}, result); ok { - if clusterID, ok = tempClusterID.(string); ok { + if clusterID, ok = tempClusterID.(string); ok { if meta := elastic.GetMetadata(clusterID); meta != nil && meta.ClusterState != nil { source["is_master_node"] = meta.ClusterState.MasterNode == nodeID } @@ -316,35 +311,35 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps return } // 索引速率 - indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) - indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) + indexMetric := newMetricItem("indexing", 1, OperationGroupKey) + indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) nodeMetricItems := []GroupMetricItem{} - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "indexing", - Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", - ID: util.GetUUID(), + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "indexing", + Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: indexMetric, - FormatType: "num", - Units: "Indexing/s", + MetricItem: indexMetric, + FormatType: "num", + Units: "Indexing/s", }) - queryMetric:=newMetricItem("search", 2, OperationGroupKey) - queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ - Key: "search", - Field: "payload.elasticsearch.node_stats.indices.search.query_total", - ID: util.GetUUID(), + queryMetric := newMetricItem("search", 2, OperationGroupKey) + queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + nodeMetricItems = append(nodeMetricItems, GroupMetricItem{ + Key: "search", + Field: "payload.elasticsearch.node_stats.indices.search.query_total", + ID: util.GetUUID(), IsDerivative: true, - MetricItem: queryMetric, - FormatType: "num", - Units: "Search/s", + MetricItem: queryMetric, + FormatType: "num", + Units: "Search/s", }) - aggs:=map[string]interface{}{} - query=map[string]interface{}{} - query["query"]=util.MapStr{ + aggs := map[string]interface{}{} + query = map[string]interface{}{} + query["query"] = util.MapStr{ "bool": util.MapStr{ - "must": []util.MapStr{ + "must": []util.MapStr{ { "term": util.MapStr{ "metadata.category": util.MapStr{ @@ -378,15 +373,15 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps }, } - for _,metricItem:=range nodeMetricItems{ - aggs[metricItem.ID]=util.MapStr{ - "max":util.MapStr{ + for _, metricItem := range nodeMetricItems { + aggs[metricItem.ID] = util.MapStr{ + "max": util.MapStr{ "field": metricItem.Field, }, } - if metricItem.IsDerivative{ - aggs[metricItem.ID+"_deriv"]=util.MapStr{ - "derivative":util.MapStr{ + if metricItem.IsDerivative { + aggs[metricItem.ID+"_deriv"] = util.MapStr{ + "derivative": util.MapStr{ "buckets_path": metricItem.ID, }, } @@ -398,8 +393,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps if err != nil { panic(err) } - query["size"]=0 - query["aggs"]= util.MapStr{ + query["size"] = 0 + query["aggs"] = util.MapStr{ "group_by_level": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.node_id", @@ -407,11 +402,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps }, "aggs": util.MapStr{ "dates": util.MapStr{ - "date_histogram":util.MapStr{ - "field": "timestamp", + "date_histogram": util.MapStr{ + "field": "timestamp", intervalField: bucketSizeStr, }, - "aggs":aggs, + "aggs": aggs, }, }, }, @@ -425,9 +420,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps indexMetrics := map[string]util.MapStr{} for key, item := range metrics { for _, line := range item.Lines { - if _, ok := indexMetrics[line.Metric.Label]; !ok{ - indexMetrics[line.Metric.Label] = util.MapStr{ - } + if _, ok := indexMetrics[line.Metric.Label]; !ok { + indexMetrics[line.Metric.Label] = util.MapStr{} } indexMetrics[line.Metric.Label][key] = line.Data } @@ -487,7 +481,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht // return //} q1 := orm.Query{ - Size: 1, + Size: 1, WildcardIndex: true, } q1.Conds = orm.And( @@ -512,7 +506,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht tt, _ := time.Parse(time.RFC3339, ts) if time.Now().Sub(tt).Seconds() > 30 { kvs["status"] = "unavailable" - }else{ + } else { kvs["status"] = "available" } } @@ -530,9 +524,9 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht jvm, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_stats", "jvm"}, vresult) if ok { - if jvmVal, ok := jvm.(map[string]interface{});ok { + if jvmVal, ok := jvm.(map[string]interface{}); ok { kvs["jvm"] = util.MapStr{ - "mem": jvmVal["mem"], + "mem": jvmVal["mem"], "uptime": jvmVal["uptime_in_millis"], } } @@ -553,7 +547,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht } } } - if len( response.Hits.Hits) > 0 { + if len(response.Hits.Hits) > 0 { hit := response.Hits.Hits[0] innerMetaData, _ := util.GetMapValueByKeys([]string{"metadata", "labels"}, hit.Source) if mp, ok := innerMetaData.(map[string]interface{}); ok { @@ -583,8 +577,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque nodeID := ps.MustGetParameter("node_id") var must = []util.MapStr{ { - "term":util.MapStr{ - "metadata.labels.cluster_uuid":util.MapStr{ + "term": util.MapStr{ + "metadata.labels.cluster_uuid": util.MapStr{ "value": clusterUUID, }, }, @@ -612,15 +606,15 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque }, } resBody := map[string]interface{}{} - bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req,clusterID, MetricTypeNodeStats,60) + bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, MetricTypeNodeStats, 60) if err != nil { log.Error(err) resBody["error"] = err h.WriteJSON(w, resBody, http.StatusInternalServerError) return } - query:=map[string]interface{}{} - query["query"]=util.MapStr{ + query := map[string]interface{}{} + query["query"] = util.MapStr{ "bool": util.MapStr{ "must": must, "filter": []util.MapStr{ @@ -636,67 +630,67 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque }, } - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) - metricItems:=[]*common.MetricItem{} - metricItem:=newMetricItem("cpu", 1, SystemGroupKey) - metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) - metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false) - metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false) - metricItems=append(metricItems,metricItem) - metricItem =newMetricItem("jvm", 2, SystemGroupKey) - metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) - metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false) - metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false) - metricItems=append(metricItems,metricItem) - metricItem=newMetricItem("index_throughput", 3, OperationGroupKey) - metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) - metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true) - metricItems=append(metricItems,metricItem) - metricItem=newMetricItem("search_throughput", 4, OperationGroupKey) - metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false) - metricItem.AddLine("Search Rate","Total Shards", + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) + metricItems := []*common.MetricItem{} + metricItem := newMetricItem("cpu", 1, SystemGroupKey) + metricItem.AddAxi("cpu", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true) + metricItem.AddLine("Process CPU", "Process CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.process.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false) + metricItem.AddLine("OS CPU", "OS CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.os.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false) + metricItems = append(metricItems, metricItem) + metricItem = newMetricItem("jvm", 2, SystemGroupKey) + metricItem.AddAxi("JVM Heap", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true) + metricItem.AddLine("Max Heap", "Max Heap", "JVM max Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false) + metricItem.AddLine("Used Heap", "Used Heap", "JVM used Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false) + metricItems = append(metricItems, metricItem) + metricItem = newMetricItem("index_throughput", 3, OperationGroupKey) + metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) + metricItem.AddLine("Indexing Rate", "Total Shards", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true) + metricItems = append(metricItems, metricItem) + metricItem = newMetricItem("search_throughput", 4, OperationGroupKey) + metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false) + metricItem.AddLine("Search Rate", "Total Shards", "Number of search requests being executed.", - "group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true) - metricItems=append(metricItems,metricItem) + "group1", "payload.elasticsearch.node_stats.indices.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true) + metricItems = append(metricItems, metricItem) - metricItem=newMetricItem("index_latency", 5, LatencyGroupKey) - metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) + metricItem = newMetricItem("index_latency", 5, LatencyGroupKey) + metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true) - metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total" metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total" metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItems=append(metricItems,metricItem) + metricItems = append(metricItems, metricItem) - metricItem=newMetricItem("search_latency", 6, LatencyGroupKey) - metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false) + metricItem = newMetricItem("search_latency", 6, LatencyGroupKey) + metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false) - metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total" metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total" metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) + metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true) metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total" metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 { - return value/value2 + return value / value2 } - metricItems=append(metricItems,metricItem) - metricItem =newMetricItem("parent_breaker", 8, SystemGroupKey) - metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true) - metricItems=append(metricItems,metricItem) - metrics, err := h.getSingleMetrics(context.Background(), metricItems,query, bucketSize) + metricItems = append(metricItems, metricItem) + metricItem = newMetricItem("parent_breaker", 8, SystemGroupKey) + metricItem.AddLine("Parent Breaker Tripped", "Parent Breaker Tripped", "Rate of the circuit breaker has been triggered and prevented an out of memory error.", "group1", "payload.elasticsearch.node_stats.breakers.parent.tripped", "max", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true) + metricItems = append(metricItems, metricItem) + metrics, err := h.getSingleMetrics(context.Background(), metricItems, query, bucketSize) if err != nil { log.Error(err) h.WriteError(w, err, http.StatusInternalServerError) @@ -713,8 +707,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque h.WriteJSON(w, resBody, http.StatusOK) } -func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem, error){ - bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) +func getNodeHealthMetric(query util.MapStr, bucketSize int) (*common.MetricItem, error) { + bucketSizeStr := fmt.Sprintf("%vs", bucketSize) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) if err != nil { return nil, err @@ -722,7 +716,7 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem, query["aggs"] = util.MapStr{ "dates": util.MapStr{ "date_histogram": util.MapStr{ - "field": "timestamp", + "field": "timestamp", intervalField: bucketSizeStr, }, "aggs": util.MapStr{ @@ -740,8 +734,8 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem, return nil, err } - metricItem:=newMetricItem("node_health", 0, "") - metricItem.AddLine("Node health","Node Health","","group1","payload.elasticsearch.node_stats.jvm.uptime_in_millis","min",bucketSizeStr,"%","ratio","0.[00]","0.[00]",false,false) + metricItem := newMetricItem("node_health", 0, "") + metricItem.AddLine("Node health", "Node Health", "", "group1", "payload.elasticsearch.node_stats.jvm.uptime_in_millis", "min", bucketSizeStr, "%", "ratio", "0.[00]", "0.[00]", false, false) metricData := []interface{}{} if response.StatusCode == 200 { @@ -770,7 +764,7 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem, return metricItem, nil } -func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, error){ +func getNodeOnlineStatusOfRecentDay(nodeIDs []string) (map[string][]interface{}, error) { q := orm.Query{ WildcardIndex: true, } @@ -779,64 +773,64 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, "group_by_node_id": util.MapStr{ "terms": util.MapStr{ "field": "metadata.labels.node_id", - "size": 100, + "size": 100, }, "aggs": util.MapStr{ "uptime_histogram": util.MapStr{ - "date_range": util.MapStr{ + "date_range": util.MapStr{ "field": "timestamp", "format": "yyyy-MM-dd", "time_zone": "+08:00", "ranges": []util.MapStr{ { "from": "now-13d/d", - "to": "now-12d/d", + "to": "now-12d/d", }, { "from": "now-12d/d", - "to": "now-11d/d", + "to": "now-11d/d", }, { "from": "now-11d/d", - "to": "now-10d/d", + "to": "now-10d/d", }, { "from": "now-10d/d", - "to": "now-9d/d", + "to": "now-9d/d", }, { "from": "now-9d/d", - "to": "now-8d/d", + "to": "now-8d/d", }, { "from": "now-8d/d", - "to": "now-7d/d", + "to": "now-7d/d", }, { "from": "now-7d/d", - "to": "now-6d/d", + "to": "now-6d/d", }, { "from": "now-6d/d", - "to": "now-5d/d", + "to": "now-5d/d", }, { "from": "now-5d/d", - "to": "now-4d/d", + "to": "now-4d/d", }, { "from": "now-4d/d", - "to": "now-3d/d", - },{ + "to": "now-3d/d", + }, { "from": "now-3d/d", - "to": "now-2d/d", + "to": "now-2d/d", }, { "from": "now-2d/d", - "to": "now-1d/d", + "to": "now-1d/d", }, { "from": "now-1d/d", - "to": "now/d", + "to": "now/d", }, { "from": "now/d", - "to": "now", + "to": "now", }, }, }, @@ -865,7 +859,7 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, { "range": util.MapStr{ "timestamp": util.MapStr{ - "gte":"now-15d", + "gte": "now-15d", "lte": "now", }, }, @@ -903,13 +897,13 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, recentStatus[nodeKey] = []interface{}{} if histogramAgg, ok := bk["uptime_histogram"].(map[string]interface{}); ok { if bks, ok := histogramAgg["buckets"].([]interface{}); ok { - for _, bkItem := range bks { + for _, bkItem := range bks { if bkVal, ok := bkItem.(map[string]interface{}); ok { if minUptime, ok := util.GetMapValueByKeys([]string{"min_uptime", "value"}, bkVal); ok { //mark node status as offline when uptime less than 10m if v, ok := minUptime.(float64); ok && v >= 600000 { recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "online"}) - }else{ + } else { recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "offline"}) } } @@ -927,10 +921,10 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps max = h.GetParameterOrDefault(req, "max", "now") ) - resBody := map[string] interface{}{} + resBody := map[string]interface{}{} id := ps.ByName("id") nodeUUID := ps.ByName("node_id") - q := &orm.Query{ Size: 1} + q := &orm.Query{Size: 1} q.AddSort("timestamp", orm.DESC) q.Conds = orm.And( orm.Eq("metadata.category", "elasticsearch"), @@ -942,16 +936,16 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps err, result := orm.Search(event.Event{}, q) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } namesM := util.MapStr{} if len(result.Result) > 0 { if data, ok := result.Result[0].(map[string]interface{}); ok { if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_routing_table"}, data); exists { - if rows, ok := routingTable.([]interface{}); ok{ + if rows, ok := routingTable.([]interface{}); ok { for _, row := range rows { if v, ok := row.(map[string]interface{}); ok { - if indexName, ok := v["index"].(string); ok{ + if indexName, ok := v["index"].(string); ok { namesM[indexName] = true } } @@ -961,12 +955,12 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps } } - indexNames := make([]interface{}, 0, len(namesM) ) + indexNames := make([]interface{}, 0, len(namesM)) for name, _ := range namesM { indexNames = append(indexNames, name) } - q1 := &orm.Query{ Size: 100} + q1 := &orm.Query{Size: 100} q1.AddSort("timestamp", orm.DESC) q1.Conds = orm.And( orm.Eq("metadata.category", "elasticsearch"), @@ -977,13 +971,13 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps err, result = orm.Search(elastic.IndexConfig{}, q1) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } indices, err := h.getLatestIndices(req, min, max, id, &result) if err != nil { resBody["error"] = err.Error() - h.WriteJSON(w,resBody, http.StatusInternalServerError ) + h.WriteJSON(w, resBody, http.StatusInternalServerError) } h.WriteJSON(w, indices, http.StatusOK) @@ -1069,7 +1063,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, } indices := []interface{}{} var indexPattern *radix.Pattern - if !hasAllPrivilege{ + if !hasAllPrivilege { indexPattern = radix.Compile(allowedIndices...) } @@ -1102,12 +1096,11 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, return indices, nil } - func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { clusterID := ps.MustGetParameter("id") nodeID := ps.MustGetParameter("node_id") q1 := orm.Query{ - Size: 1, + Size: 1, WildcardIndex: true, } q1.Conds = orm.And( @@ -1119,9 +1112,9 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps q1.AddSort("timestamp", orm.DESC) err, result := orm.Search(&event.Event{}, &q1) if err != nil { - h.WriteJSON(w,util.MapStr{ + h.WriteJSON(w, util.MapStr{ "error": err.Error(), - }, http.StatusInternalServerError ) + }, http.StatusInternalServerError) return } var shardInfo interface{} = []interface{}{} @@ -1155,4 +1148,4 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps } h.WriteJSON(w, shardInfo, http.StatusOK) -} \ No newline at end of file +} diff --git a/modules/security/realm/authc/saml/main.go b/modules/security/realm/authc/saml/main.go index 78b0354e..300ac046 100644 --- a/modules/security/realm/authc/saml/main.go +++ b/modules/security/realm/authc/saml/main.go @@ -28,15 +28,15 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "github.com/crewjam/saml" "net/http" "net/url" - "github.com/crewjam/saml" "github.com/crewjam/saml/samlsp" ) var metdataurl = "https://sso.infini.ltd/metadata" //Metadata of the IDP -var sessioncert = "./sessioncert" //Key pair used for creating a signed session +var sessioncert = "./sessioncert" //Key pair used for creating a signed session var sessionkey = "./sessionkey" var serverkey = "./serverkey" //Server TLS var servercert = "./servercert" @@ -68,13 +68,13 @@ func main() { rootURL, err := url.Parse(serverurl) panicIfError(err) samlSP, _ := samlsp.New(samlsp.Options{ - URL: *rootURL, - Key: keyPair.PrivateKey.(*rsa.PrivateKey), - Certificate: keyPair.Leaf, + URL: *rootURL, + Key: keyPair.PrivateKey.(*rsa.PrivateKey), + Certificate: keyPair.Leaf, IDPMetadata: &saml.EntityDescriptor{ - //EntityID: - }, // you can also have Metadata XML instead of URL - EntityID: entityId, + //EntityID: + }, // you can also have Metadata XML instead of URL + EntityID: entityId, }) app := http.HandlerFunc(hello) http.Handle("/hello", samlSP.RequireAccount(app)) diff --git a/plugin/api/alerting/alert.go b/plugin/api/alerting/alert.go index cefa6fa7..c9f3e61e 100644 --- a/plugin/api/alerting/alert.go +++ b/plugin/api/alerting/alert.go @@ -49,7 +49,7 @@ func (h *AlertAPI) getAlert(w http.ResponseWriter, req *http.Request, ps httprou q := orm.Query{ WildcardIndex: true, - Size: 1, + Size: 1, } q.Conds = orm.And(orm.Eq("id", id)) err, result := orm.Search(obj, &q) @@ -76,16 +76,16 @@ func (h *AlertAPI) getAlert(w http.ResponseWriter, req *http.Request, ps httprou func (h *AlertAPI) searchAlert(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { var ( - keyword = h.GetParameterOrDefault(req, "keyword", "") + keyword = h.GetParameterOrDefault(req, "keyword", "") queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}` strSize = h.GetParameterOrDefault(req, "size", "20") strFrom = h.GetParameterOrDefault(req, "from", "0") - state = h.GetParameterOrDefault(req, "state", "") - priority = h.GetParameterOrDefault(req, "priority", "") - sort = h.GetParameterOrDefault(req, "sort", "") - ruleID = h.GetParameterOrDefault(req, "rule_id", "") - min = h.GetParameterOrDefault(req, "min", "") - max = h.GetParameterOrDefault(req, "max", "") + state = h.GetParameterOrDefault(req, "state", "") + priority = h.GetParameterOrDefault(req, "priority", "") + sort = h.GetParameterOrDefault(req, "sort", "") + ruleID = h.GetParameterOrDefault(req, "rule_id", "") + min = h.GetParameterOrDefault(req, "min", "") + max = h.GetParameterOrDefault(req, "max", "") mustBuilder = &strings.Builder{} sortBuilder = strings.Builder{} ) @@ -160,13 +160,13 @@ func (h *AlertAPI) getAlertStats(w http.ResponseWriter, req *http.Request, ps ht "terms_by_state": util.MapStr{ "terms": util.MapStr{ "field": "priority", - "size": 5, + "size": 5, }, }, }, } - searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl) ) + searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl)) if err != nil { h.WriteJSON(w, util.MapStr{ "error": err.Error(), @@ -191,4 +191,4 @@ func (h *AlertAPI) getAlertStats(w http.ResponseWriter, req *http.Request, ps ht "current": priorityAlerts, }, }, http.StatusOK) -} \ No newline at end of file +} diff --git a/plugin/api/alerting/channel.go b/plugin/api/alerting/channel.go index a23446cd..f7b52e00 100644 --- a/plugin/api/alerting/channel.go +++ b/plugin/api/alerting/channel.go @@ -209,17 +209,17 @@ func (h *AlertAPI) deleteChannel(w http.ResponseWriter, req *http.Request, ps ht func (h *AlertAPI) searchChannel(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { var ( - keyword = h.GetParameterOrDefault(req, "keyword", "") - strSize = h.GetParameterOrDefault(req, "size", "20") - strFrom = h.GetParameterOrDefault(req, "from", "0") - subType = h.GetParameterOrDefault(req, "sub_type", "") + keyword = h.GetParameterOrDefault(req, "keyword", "") + strSize = h.GetParameterOrDefault(req, "size", "20") + strFrom = h.GetParameterOrDefault(req, "from", "0") + subType = h.GetParameterOrDefault(req, "sub_type", "") typ = h.GetParameterOrDefault(req, "type", "") - sort = h.GetParameterOrDefault(req, "sort", "updated:desc") + sort = h.GetParameterOrDefault(req, "sort", "updated:desc") ) mustQ := []interface{}{} if keyword != "" { mustQ = append(mustQ, util.MapStr{ - "query_string": util.MapStr{"default_field":"*","query": keyword}, + "query_string": util.MapStr{"default_field": "*", "query": keyword}, }) } if typ != "" { @@ -249,7 +249,7 @@ func (h *AlertAPI) searchChannel(w http.ResponseWriter, req *http.Request, ps ht from = 0 } var ( - sortField string + sortField string sortDirection string ) sortParts := strings.Split(sort, ":") @@ -306,26 +306,26 @@ func (h *AlertAPI) testChannel(w http.ResponseWriter, req *http.Request, ps http } firstGrpValue := global.MustLookupString(elastic.GlobalSystemElasticsearchID) ctx := map[string]interface{}{ - "title": "INFINI platform test alert message", - "message": "This is just a test message, do not reply!", - "objects": []string{".infini_metrics"}, - "trigger_at": time.Now().UnixMilli(), - "duration": "5m10s", - "rule_id": util.GetUUID(), - "rule_name": "test rule", - "resource_id": util.GetUUID(), - "resource_name": "test resource", - "event_id": util.GetUUID(), - "timestamp": time.Now().UnixMilli(), + "title": "INFINI platform test alert message", + "message": "This is just a test message, do not reply!", + "objects": []string{".infini_metrics"}, + "trigger_at": time.Now().UnixMilli(), + "duration": "5m10s", + "rule_id": util.GetUUID(), + "rule_name": "test rule", + "resource_id": util.GetUUID(), + "resource_name": "test resource", + "event_id": util.GetUUID(), + "timestamp": time.Now().UnixMilli(), "first_group_value": firstGrpValue, - "first_threshold": "90", - "priority": "critical", + "first_threshold": "90", + "priority": "critical", "results": []util.MapStr{ {"threshold": "90", - "priority": "critical", - "group_values": []string{firstGrpValue, "group_value2" }, - "issue_timestamp": time.Now().UnixMilli()-500, - "result_value": 90, + "priority": "critical", + "group_values": []string{firstGrpValue, "group_value2"}, + "issue_timestamp": time.Now().UnixMilli() - 500, + "result_value": 90, "relation_values": util.MapStr{"a": 100, "b": 90}, }, }, @@ -399,4 +399,4 @@ func setChannelEnabled(enabled bool, channelIDs []string) error { } err := orm.UpdateBy(alerting.Channel{}, util.MustToJSONBytes(q)) return err -} \ No newline at end of file +} diff --git a/plugin/api/alerting/message.go b/plugin/api/alerting/message.go index dabd34cd..024696b6 100644 --- a/plugin/api/alerting/message.go +++ b/plugin/api/alerting/message.go @@ -47,11 +47,11 @@ import ( func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { body := struct { - Messages []alerting.AlertMessage `json:"messages"` - IgnoredReason string `json:"ignored_reason"` - IsReset bool `json:"is_reset"` + Messages []alerting.AlertMessage `json:"messages"` + IgnoredReason string `json:"ignored_reason"` + IsReset bool `json:"is_reset"` }{} - err := h.DecodeJSON(req, &body) + err := h.DecodeJSON(req, &body) if err != nil { h.WriteError(w, err.Error(), http.StatusInternalServerError) return @@ -83,7 +83,7 @@ func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request, }, }) source = fmt.Sprintf("ctx._source['status'] = '%s'", alerting.MessageStateAlerting) - }else { + } else { must = append(must, util.MapStr{ "term": util.MapStr{ "status": util.MapStr{ @@ -114,9 +114,8 @@ func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request, _ = kv.DeleteKey(alerting2.KVLastMessageState, []byte(msg.RuleID)) } - h.WriteJSON(w, util.MapStr{ - "ids": messageIDs, + "ids": messageIDs, "result": "updated", }, 200) } @@ -138,7 +137,7 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request return } if !hasAllPrivilege { - must = append(must,clusterFilter) + must = append(must, clusterFilter) } queryDsl := util.MapStr{ "size": 0, @@ -151,13 +150,13 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request "terms_by_priority": util.MapStr{ "terms": util.MapStr{ "field": "priority", - "size": 5, + "size": 5, }, }, }, } indexName := orm.GetWildcardIndexName(alerting.AlertMessage{}) - searchRes, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl) ) + searchRes, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl)) if err != nil { h.WriteJSON(w, util.MapStr{ "error": err.Error(), @@ -172,7 +171,7 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request } } } - for _, status := range []string{"info", "low","medium","high", "critical"} { + for _, status := range []string{"info", "low", "medium", "high", "critical"} { if _, ok := statusCounts[status]; !ok { statusCounts[status] = 0 } @@ -206,18 +205,18 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request "terms_by_category": util.MapStr{ "terms": util.MapStr{ "field": "category", - "size": 100, + "size": 100, }, }, "terms_by_tags": util.MapStr{ "terms": util.MapStr{ "field": "tags", - "size": 100, + "size": 100, }, }, }, } - searchRes, err = esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl) ) + searchRes, err = esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl)) if err != nil { h.WriteJSON(w, util.MapStr{ "error": err.Error(), @@ -245,15 +244,14 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request "current": statusCounts, }, "categories": categories, - "tags": tags, + "tags": tags, }, http.StatusOK) } - func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { var ( - queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d,"aggs": { + queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d,"aggs": { "max_updated": { "max": { "field": "updated" @@ -267,16 +265,16 @@ func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request, }}` strSize = h.GetParameterOrDefault(req, "size", "20") strFrom = h.GetParameterOrDefault(req, "from", "0") - status = h.GetParameterOrDefault(req, "status", "") - priority = h.GetParameterOrDefault(req, "priority", "") - sort = h.GetParameterOrDefault(req, "sort", "") - ruleID = h.GetParameterOrDefault(req, "rule_id", "") - min = h.GetParameterOrDefault(req, "min", "") - max = h.GetParameterOrDefault(req, "max", "") + status = h.GetParameterOrDefault(req, "status", "") + priority = h.GetParameterOrDefault(req, "priority", "") + sort = h.GetParameterOrDefault(req, "sort", "") + ruleID = h.GetParameterOrDefault(req, "rule_id", "") + min = h.GetParameterOrDefault(req, "min", "") + max = h.GetParameterOrDefault(req, "max", "") mustBuilder = &strings.Builder{} sortBuilder = strings.Builder{} - category = h.GetParameterOrDefault(req, "category", "") - tags = h.GetParameterOrDefault(req, "tags", "") + category = h.GetParameterOrDefault(req, "category", "") + tags = h.GetParameterOrDefault(req, "tags", "") ) timeRange := util.MapStr{} if min != "" { @@ -292,7 +290,7 @@ func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request, }, } mustBuilder.Write(util.MustToJSONBytes(timeFilter)) - }else{ + } else { mustBuilder.WriteString(`{"match_all":{}}`) } @@ -374,7 +372,7 @@ func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request, h.WriteJSON(w, esRes, http.StatusOK) } -func parseTime( t interface{}, layout string) (time.Time, error){ +func parseTime(t interface{}, layout string) (time.Time, error) { switch t.(type) { case string: return time.Parse(layout, t.(string)) @@ -384,7 +382,7 @@ func parseTime( t interface{}, layout string) (time.Time, error){ } func (h *AlertAPI) getAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - message := &alerting.AlertMessage{ + message := &alerting.AlertMessage{ ID: ps.ByName("message_id"), } exists, err := orm.Get(message) @@ -417,36 +415,36 @@ func (h *AlertAPI) getAlertMessage(w http.ResponseWriter, req *http.Request, ps var duration time.Duration if message.Status == alerting.MessageStateRecovered { duration = message.Updated.Sub(message.Created) - }else{ + } else { duration = time.Now().Sub(message.Created) } detailObj := util.MapStr{ - "message_id": message.ID, - "rule_id": message.RuleID, - "rule_name": rule.Name, - "rule_enabled": rule.Enabled, - "title": message.Title, - "message": message.Message, - "priority": message.Priority, - "created": message.Created, - "updated": message.Updated, - "resource_name": rule.Resource.Name, - "resource_id": rule.Resource.ID, + "message_id": message.ID, + "rule_id": message.RuleID, + "rule_name": rule.Name, + "rule_enabled": rule.Enabled, + "title": message.Title, + "message": message.Message, + "priority": message.Priority, + "created": message.Created, + "updated": message.Updated, + "resource_name": rule.Resource.Name, + "resource_id": rule.Resource.ID, "resource_objects": rule.Resource.Objects, - "conditions": rule.Conditions, - "duration": duration.Milliseconds(), - "ignored_time": message.IgnoredTime, - "ignored_reason": message.IgnoredReason, - "ignored_user": message.IgnoredUser, - "status": message.Status, - "expression": rule.Metrics.Expression, - "hit_condition": hitCondition, + "conditions": rule.Conditions, + "duration": duration.Milliseconds(), + "ignored_time": message.IgnoredTime, + "ignored_reason": message.IgnoredReason, + "ignored_user": message.IgnoredUser, + "status": message.Status, + "expression": rule.Metrics.Expression, + "hit_condition": hitCondition, } h.WriteJSON(w, detailObj, http.StatusOK) } func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - message := &alerting.AlertMessage{ + message := &alerting.AlertMessage{ ID: ps.ByName("message_id"), } exists, err := orm.Get(message) @@ -481,12 +479,12 @@ func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.R } if rule.NotificationConfig != nil { notificationInfo["alerting"] = util.MapStr{ - "accept_time_range": rule.NotificationConfig.AcceptTimeRange, - "throttle_period": rule.NotificationConfig.ThrottlePeriod, - "escalation_enabled": rule.NotificationConfig.EscalationEnabled, + "accept_time_range": rule.NotificationConfig.AcceptTimeRange, + "throttle_period": rule.NotificationConfig.ThrottlePeriod, + "escalation_enabled": rule.NotificationConfig.EscalationEnabled, "escalation_throttle_period": rule.NotificationConfig.EscalationThrottlePeriod, - "normal_stats": stats["normal"], - "escalation_stats": stats["escalation"], + "normal_stats": stats["normal"], + "escalation_stats": stats["escalation"], } } if rule.RecoveryNotificationConfig != nil { @@ -497,7 +495,7 @@ func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.R h.WriteJSON(w, notificationInfo, http.StatusOK) } -func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error){ +func getMessageNotificationStats(msg *alerting.AlertMessage) (util.MapStr, error) { rangeQ := util.MapStr{ "gte": msg.Created.UnixMilli(), } @@ -508,7 +506,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error "grp_normal_channel": util.MapStr{ "terms": util.MapStr{ "field": "action_execution_results.channel_type", - "size": 20, + "size": 20, }, "aggs": util.MapStr{ "top": util.MapStr{ @@ -531,7 +529,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error "grp_escalation_channel": util.MapStr{ "terms": util.MapStr{ "field": "escalation_action_results.channel_type", - "size": 20, + "size": 20, }, "aggs": util.MapStr{ "top": util.MapStr{ @@ -556,7 +554,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error aggs["grp_recover_channel"] = util.MapStr{ "terms": util.MapStr{ "field": "recover_action_results.channel_type", - "size": 20, + "size": 20, }, "aggs": util.MapStr{ "top": util.MapStr{ @@ -610,7 +608,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error var normalStats = extractStatsFromRaw(result.Raw, "grp_normal_channel", "action_execution_results") var escalationStats = extractStatsFromRaw(result.Raw, "grp_escalation_channel", "escalation_action_results") stats := util.MapStr{ - "normal": normalStats, + "normal": normalStats, "escalation": escalationStats, } if msg.Status == alerting.MessageStateRecovered { @@ -627,15 +625,15 @@ func extractStatsFromRaw(searchRawRes []byte, grpKey string, actionKey string) [ statsItem["channel_type"], _ = jsonparser.GetString(value, "key") statsItem["count"], _ = jsonparser.GetInt(value, "doc_count") jsonparser.ArrayEach(value, func(v []byte, dataType jsonparser.ValueType, offset int, err error) { - ck, _ := jsonparser.GetString(v, "channel_type") - cn, _ := jsonparser.GetString(v, "channel_name") + ck, _ := jsonparser.GetString(v, "channel_type") + cn, _ := jsonparser.GetString(v, "channel_name") if ck == statsItem["channel_type"] { statsItem["channel_name"] = cn - statsItem["error"], _ = jsonparser.GetString(v, "error") + statsItem["error"], _ = jsonparser.GetString(v, "error") } - }, "top", "hits","hits", "[0]", "_source",actionKey) - statsItem["last_time"], _ = jsonparser.GetString(value, "top", "hits","hits", "[0]", "_source","created") + }, "top", "hits", "hits", "[0]", "_source", actionKey) + statsItem["last_time"], _ = jsonparser.GetString(value, "top", "hits", "hits", "[0]", "_source", "created") stats = append(stats, statsItem) }, "aggregations", grpKey, "buckets") return stats -} \ No newline at end of file +} diff --git a/plugin/api/email/api.go b/plugin/api/email/api.go index 9515131b..47629d97 100644 --- a/plugin/api/email/api.go +++ b/plugin/api/email/api.go @@ -48,9 +48,9 @@ func InitAPI() { api.HandleAPIMethod(api.POST, "/email/server/_test", email.RequirePermission(email.testEmailServer, enum.PermissionSmtpServerRead)) api.HandleAPIMethod(api.GET, "/email/server/:email_server_id", email.RequirePermission(email.getEmailServer, enum.PermissionAlertRuleRead)) api.HandleAPIMethod(api.POST, "/email/server", email.RequirePermission(email.createEmailServer, enum.PermissionSmtpServerWrite)) - api.HandleAPIMethod(api.PUT, "/email/server/:email_server_id", email.RequirePermission(email.updateEmailServer, enum.PermissionSmtpServerWrite)) - api.HandleAPIMethod(api.DELETE, "/email/server/:email_server_id", email.RequirePermission(email.deleteEmailServer, enum.PermissionSmtpServerWrite)) - api.HandleAPIMethod(api.GET, "/email/server/_search", email.RequirePermission(email.searchEmailServer, enum.PermissionSmtpServerRead)) + api.HandleAPIMethod(api.PUT, "/email/server/:email_server_id", email.RequirePermission(email.updateEmailServer, enum.PermissionSmtpServerWrite)) + api.HandleAPIMethod(api.DELETE, "/email/server/:email_server_id", email.RequirePermission(email.deleteEmailServer, enum.PermissionSmtpServerWrite)) + api.HandleAPIMethod(api.GET, "/email/server/_search", email.RequirePermission(email.searchEmailServer, enum.PermissionSmtpServerRead)) credential.RegisterChangeEvent(func(cred *credential.Credential) { query := util.MapStr{ diff --git a/plugin/api/index_management/common_command.go b/plugin/api/index_management/common_command.go index 9ea70b64..4e453f2e 100644 --- a/plugin/api/index_management/common_command.go +++ b/plugin/api/index_management/common_command.go @@ -38,8 +38,7 @@ import ( ) func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string]interface{}{ - } + resBody := map[string]interface{}{} reqParams := elastic.CommonCommand{} err := h.DecodeJSON(req, &reqParams) @@ -54,8 +53,8 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht reqParams.ID = util.GetUUID() esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) - queryDSL :=[]byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title)) - var indexName = orm.GetIndexName(reqParams) + queryDSL := []byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title)) + var indexName = orm.GetIndexName(reqParams) searchRes, err := esClient.SearchWithRawQueryDSL(indexName, queryDSL) if err != nil { log.Error(err) @@ -63,13 +62,13 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht h.WriteJSON(w, resBody, http.StatusOK) return } - if len(searchRes.Hits.Hits) > 0 { + if len(searchRes.Hits.Hits) > 0 { resBody["error"] = "title already exists" log.Error(resBody["error"]) h.WriteJSON(w, resBody, http.StatusOK) return } - _, err = esClient.Index(indexName,"", reqParams.ID, reqParams, "wait_for") + _, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for") if err != nil { log.Error(err) resBody["error"] = err.Error() @@ -81,12 +80,11 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht resBody["result"] = "created" resBody["_source"] = reqParams - h.WriteJSON(w, resBody,http.StatusOK) + h.WriteJSON(w, resBody, http.StatusOK) } func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string]interface{}{ - } + resBody := map[string]interface{}{} reqParams := elastic.CommonCommand{} err := h.DecodeJSON(req, &reqParams) @@ -99,8 +97,8 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h reqParams.ID = ps.ByName("cid") esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) - queryDSL :=[]byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title)) - var indexName = orm.GetIndexName(reqParams) + queryDSL := []byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title)) + var indexName = orm.GetIndexName(reqParams) searchRes, err := esClient.SearchWithRawQueryDSL(indexName, queryDSL) if err != nil { log.Error(err) @@ -108,13 +106,13 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h h.WriteJSON(w, resBody, http.StatusInternalServerError) return } - if len(searchRes.Hits.Hits) > 0 && searchRes.Hits.Hits[0].ID != reqParams.ID { + if len(searchRes.Hits.Hits) > 0 && searchRes.Hits.Hits[0].ID != reqParams.ID { resBody["error"] = "title already exists" log.Error(resBody["error"]) h.WriteJSON(w, resBody, http.StatusInternalServerError) return } - _, err = esClient.Index(indexName,"", reqParams.ID, reqParams, "wait_for") + _, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for") if err != nil { log.Error(err) resBody["error"] = err.Error() @@ -126,21 +124,20 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h resBody["result"] = "updated" resBody["_source"] = reqParams - h.WriteJSON(w, resBody,http.StatusOK) + h.WriteJSON(w, resBody, http.StatusOK) } func (h *APIHandler) HandleQueryCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - resBody := map[string]interface{}{ - } + resBody := map[string]interface{}{} var ( - keyword = h.GetParameterOrDefault(req, "keyword", "") + keyword = h.GetParameterOrDefault(req, "keyword", "") queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}` strSize = h.GetParameterOrDefault(req, "size", "20") strFrom = h.GetParameterOrDefault(req, "from", "0") filterBuilder = &strings.Builder{} ) - if keyword != ""{ + if keyword != "" { filterBuilder.WriteString(fmt.Sprintf(`{"query_string": { "default_field": "*", "query": "%s" @@ -167,7 +164,7 @@ func (h *APIHandler) HandleQueryCommonCommandAction(w http.ResponseWriter, req * return } - h.WriteJSON(w, searchRes,http.StatusOK) + h.WriteJSON(w, searchRes, http.StatusOK) } func (h *APIHandler) HandleDeleteCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { @@ -178,9 +175,9 @@ func (h *APIHandler) HandleDeleteCommonCommandAction(w http.ResponseWriter, req if err != nil { log.Error(err) resBody["error"] = err.Error() - if delRes!=nil{ + if delRes != nil { h.WriteJSON(w, resBody, delRes.StatusCode) - }else{ + } else { h.WriteJSON(w, resBody, http.StatusInternalServerError) } return @@ -190,4 +187,4 @@ func (h *APIHandler) HandleDeleteCommonCommandAction(w http.ResponseWriter, req resBody["_id"] = id resBody["result"] = delRes.Result h.WriteJSON(w, resBody, delRes.StatusCode) -} \ No newline at end of file +} diff --git a/plugin/api/index_management/document.go b/plugin/api/index_management/document.go index 365ee047..4efefd96 100644 --- a/plugin/api/index_management/document.go +++ b/plugin/api/index_management/document.go @@ -34,8 +34,8 @@ import ( ) type docReqBody struct { - From int `json:"from"` - Size int `json:"size"` + From int `json:"from"` + Size int `json:"size"` Filter string `json:"filter"` Cluster string `json:"cluster"` Keyword string `json:"keyword"` @@ -155,7 +155,7 @@ func (handler APIHandler) HandleSearchDocumentAction(w http.ResponseWriter, req } indexName := ps.ByName("index") var ( - sort = "" + sort = "" ) if reqBody.From < 0 { reqBody.From = 0 @@ -206,7 +206,7 @@ func (handler APIHandler) ValidateDocIDAction(w http.ResponseWriter, req *http.R var ( index = handler.GetParameter(req, "index") docID = handler.GetParameter(req, "doc_id") - typ = handler.GetParameter(req, "type") + typ = handler.GetParameter(req, "type") ) getRes, err := client.Get(index, typ, docID) if err != nil { diff --git a/plugin/api/insight/dashboard.go b/plugin/api/insight/dashboard.go index 539834b8..763fc5ee 100644 --- a/plugin/api/insight/dashboard.go +++ b/plugin/api/insight/dashboard.go @@ -28,13 +28,13 @@ package insight import ( - "net/http" - "strconv" log "github.com/cihub/seelog" insight2 "infini.sh/console/model/insight" httprouter "infini.sh/framework/core/api/router" "infini.sh/framework/core/orm" "infini.sh/framework/core/util" + "net/http" + "strconv" ) func (h *InsightAPI) createDashboard(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { diff --git a/plugin/api/insight/map_label.go b/plugin/api/insight/map_label.go index 1a8e9b0a..9b1a7232 100644 --- a/plugin/api/insight/map_label.go +++ b/plugin/api/insight/map_label.go @@ -70,8 +70,8 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req kv := strings.Split(part, "=") if len(kv) == 2 { k := strings.TrimSpace(kv[0]) - kvs[k]= strings.TrimSpace(kv[1]) - }else{ + kvs[k] = strings.TrimSpace(kv[1]) + } else { log.Debugf("got unexpected directory part: %s", part) } } @@ -93,7 +93,7 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req } } - valueField = kvs["property"] + valueField = kvs["property"] if indexName == "" || keyField == "" || valueField == "" { return kvs["default"] } @@ -107,7 +107,7 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req } var ( cacheLabels map[string]string - ok bool + ok bool ) if cacheLabels, ok = cacheLabelsMap[cacheKey]; !ok { var keyFieldValues []string @@ -120,7 +120,7 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req cacheLabels, err = common2.GetLabelMaps(indexName, keyField, valueField, client, keyFieldValues, len(keyFieldValues)) if err != nil { log.Error(err) - }else{ + } else { cacheLabelsMap[cacheKey] = cacheLabels } } @@ -155,10 +155,10 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req type RenderTemplateRequest struct { Contexts []RenderTemplateContext `json:"contexts"` - Template string `json:"template"` + Template string `json:"template"` } type RenderTemplateContext struct { - Key string `json:"key"` + Key string `json:"key"` Value map[string]interface{} `json:"value"` -} \ No newline at end of file +} diff --git a/plugin/api/insight/visualization.go b/plugin/api/insight/visualization.go index f75ba0e7..2e82aacc 100644 --- a/plugin/api/insight/visualization.go +++ b/plugin/api/insight/visualization.go @@ -34,8 +34,8 @@ import ( "strings" log "github.com/cihub/seelog" - httprouter "infini.sh/framework/core/api/router" "infini.sh/console/model/insight" + httprouter "infini.sh/framework/core/api/router" "infini.sh/framework/core/orm" "infini.sh/framework/core/util" ) diff --git a/plugin/managed/server/config.go b/plugin/managed/server/config.go index 8567baef..e872c124 100644 --- a/plugin/managed/server/config.go +++ b/plugin/managed/server/config.go @@ -29,13 +29,13 @@ package server import ( log "github.com/cihub/seelog" - "infini.sh/framework/modules/configs/common" - "infini.sh/framework/modules/configs/config" httprouter "infini.sh/framework/core/api/router" config3 "infini.sh/framework/core/config" "infini.sh/framework/core/global" "infini.sh/framework/core/model" "infini.sh/framework/core/util" + "infini.sh/framework/modules/configs/common" + "infini.sh/framework/modules/configs/config" "net/http" "path" "sync" diff --git a/plugin/managed/server/instance.go b/plugin/managed/server/instance.go index c0484d23..5959bf09 100644 --- a/plugin/managed/server/instance.go +++ b/plugin/managed/server/instance.go @@ -37,13 +37,13 @@ import ( log "github.com/cihub/seelog" "infini.sh/console/core/security/enum" - "infini.sh/framework/modules/configs/common" "infini.sh/framework/core/api" httprouter "infini.sh/framework/core/api/router" elastic2 "infini.sh/framework/core/elastic" "infini.sh/framework/core/model" "infini.sh/framework/core/orm" "infini.sh/framework/core/util" + "infini.sh/framework/modules/configs/common" "infini.sh/framework/modules/elastic" common2 "infini.sh/framework/modules/elastic/common" ) diff --git a/plugin/managed/server/manager.go b/plugin/managed/server/manager.go index d8aaaf73..29ce6d8f 100644 --- a/plugin/managed/server/manager.go +++ b/plugin/managed/server/manager.go @@ -32,11 +32,11 @@ import ( "fmt" log "github.com/cihub/seelog" "infini.sh/console/core" - "infini.sh/framework/modules/configs/common" "infini.sh/framework/core/api" "infini.sh/framework/core/errors" "infini.sh/framework/core/global" "infini.sh/framework/core/util" + "infini.sh/framework/modules/configs/common" "net" "net/http" "net/url" diff --git a/service/alerting/action/email.go b/service/alerting/action/email.go index 78d3991c..e63d7839 100644 --- a/service/alerting/action/email.go +++ b/service/alerting/action/email.go @@ -35,30 +35,30 @@ import ( ) type EmailAction struct { - Data *alerting.Email + Data *alerting.Email Subject string - Body string + Body string } const EmailQueueName = "email_messages" -func (act *EmailAction) Execute()([]byte, error){ +func (act *EmailAction) Execute() ([]byte, error) { queueCfg := queue.GetOrInitConfig(EmailQueueName) if act.Data.ServerID == "" { return nil, fmt.Errorf("parameter server_id must not be empty") } emailMsg := util.MapStr{ "server_id": act.Data.ServerID, - "email": act.Data.Recipients.To, - "template": "raw", + "email": act.Data.Recipients.To, + "template": "raw", "variables": util.MapStr{ - "subject": act.Subject, - "body": act.Body, + "subject": act.Subject, + "body": act.Body, "content_type": act.Data.ContentType, - "cc": act.Data.Recipients.CC, + "cc": act.Data.Recipients.CC, }, } emailMsgBytes := util.MustToJSONBytes(emailMsg) err := queue.Push(queueCfg, emailMsgBytes) return nil, err -} \ No newline at end of file +} diff --git a/service/alerting/action/webhook.go b/service/alerting/action/webhook.go index dfe5e450..e33ff1b5 100644 --- a/service/alerting/action/webhook.go +++ b/service/alerting/action/webhook.go @@ -40,7 +40,7 @@ type Action interface { } type WebhookAction struct { - Data *alerting.CustomWebhook + Data *alerting.CustomWebhook Message string } @@ -50,7 +50,7 @@ var actionClient = http.Client{ }, } -func (act *WebhookAction) Execute()([]byte, error){ +func (act *WebhookAction) Execute() ([]byte, error) { var reqURL = act.Data.URL reqBody := strings.NewReader(act.Message) req, err := http.NewRequest(http.MethodPost, reqURL, reqBody) @@ -67,4 +67,3 @@ func (act *WebhookAction) Execute()([]byte, error){ defer res.Body.Close() return ioutil.ReadAll(res.Body) } - diff --git a/service/alerting/common/helper.go b/service/alerting/common/helper.go index 2609c293..6acff481 100644 --- a/service/alerting/common/helper.go +++ b/service/alerting/common/helper.go @@ -42,9 +42,9 @@ func PerformChannel(channel *alerting.Channel, ctx map[string]interface{}) ([]by return nil, fmt.Errorf("empty channel"), nil } var ( - act action.Action + act action.Action message []byte - err error + err error ) switch channel.Type { @@ -75,7 +75,7 @@ func PerformChannel(channel *alerting.Channel, ctx map[string]interface{}) ([]by act = &action.EmailAction{ Data: channel.Email, Subject: string(subjectBytes), - Body: string(message), + Body: string(message), } default: return nil, fmt.Errorf("unsupported action type: %s", channel.Type), message @@ -84,10 +84,10 @@ func PerformChannel(channel *alerting.Channel, ctx map[string]interface{}) ([]by return executeResult, err, message } -func ResolveMessage(messageTemplate string, ctx map[string]interface{}) ([]byte, error){ - msg := messageTemplate +func ResolveMessage(messageTemplate string, ctx map[string]interface{}) ([]byte, error) { + msg := messageTemplate tmpl, err := template.New("alert-message").Funcs(funcs.GenericFuncMap()).Parse(msg) - if err !=nil { + if err != nil { return nil, fmt.Errorf("parse message temlate error: %w", err) } msgBuffer := &bytes.Buffer{} @@ -120,17 +120,17 @@ func RetrieveChannel(ch *alerting.Channel, raiseChannelEnabledErr bool) (*alerti case alerting.ChannelEmail: if ch.Email == nil { ch.Email = refCh.Email - }else{ + } else { ch.Email.ServerID = refCh.Email.ServerID ch.Email.Recipients = refCh.Email.Recipients } case alerting.ChannelWebhook: if ch.Webhook == nil { ch.Webhook = refCh.Webhook - }else { + } else { ch.Webhook.URL = refCh.Webhook.URL } } } return ch, nil -} \ No newline at end of file +} diff --git a/service/alerting/constants.go b/service/alerting/constants.go index 9c27dee5..a367a87f 100644 --- a/service/alerting/constants.go +++ b/service/alerting/constants.go @@ -29,12 +29,11 @@ package alerting const ( KVLastNotificationTime = "alert_last_notification_time" - KVLastTermStartTime = "alert_last_term_start_time" - KVLastEscalationTime = "alert_last_escalation_time" - KVLastMessageState = "alert_last_message_state" + KVLastTermStartTime = "alert_last_term_start_time" + KVLastEscalationTime = "alert_last_escalation_time" + KVLastMessageState = "alert_last_message_state" ) - const ( ParamRuleID = "rule_id" //规则 UUID ParamResourceID = "resource_id" // 资源 UUID @@ -50,6 +49,7 @@ const ( ParamGroupValues = "group_values" ParamIssueTimestamp = "issue_timestamp" ParamRelationValues = "relation_values" -//rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values], -//check_status ,timestamp, -) \ No newline at end of file + +// rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values], +// check_status ,timestamp, +) diff --git a/service/alerting/elasticsearch/engine.go b/service/alerting/elasticsearch/engine.go index c679ca0f..389e37c5 100644 --- a/service/alerting/elasticsearch/engine.go +++ b/service/alerting/elasticsearch/engine.go @@ -34,10 +34,10 @@ import ( log "github.com/cihub/seelog" "infini.sh/console/model" "infini.sh/console/model/alerting" + "infini.sh/console/model/insight" alerting2 "infini.sh/console/service/alerting" "infini.sh/console/service/alerting/common" "infini.sh/framework/core/elastic" - "infini.sh/console/model/insight" "infini.sh/framework/core/kv" "infini.sh/framework/core/orm" "infini.sh/framework/core/util" @@ -50,14 +50,14 @@ import ( ) type Engine struct { - } -//GenerateQuery generate a final elasticsearch query dsl object -//when RawFilter of rule is not empty, priority use it, otherwise to covert from Filter of rule (todo) -//auto generate time filter query and then attach to final query -//auto generate elasticsearch aggregations by metrics of rule -//group of metric item converted to terms aggregation and TimeField of rule converted to date_histogram aggregation -//convert statistic of metric item to elasticsearch aggregation + +// GenerateQuery generate a final elasticsearch query dsl object +// when RawFilter of rule is not empty, priority use it, otherwise to covert from Filter of rule (todo) +// auto generate time filter query and then attach to final query +// auto generate elasticsearch aggregations by metrics of rule +// group of metric item converted to terms aggregation and TimeField of rule converted to date_histogram aggregation +// convert statistic of metric item to elasticsearch aggregation func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (interface{}, error) { filter, err := engine.GenerateRawFilter(rule, filterParam) if err != nil { @@ -72,7 +72,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F } basicAggs := util.MapStr{} //todo bucket sort (es 6.1) bucket script (es 2.0) - for _, metricItem := range rule.Metrics.Items { + for _, metricItem := range rule.Metrics.Items { metricAggs := engine.generateAgg(&metricItem) if err = util.MergeFields(basicAggs, metricAggs, true); err != nil { return nil, err @@ -81,21 +81,21 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F verInfo := elastic.GetClient(rule.Resource.ID).GetVersion() var periodInterval = rule.Metrics.BucketSize if filterParam != nil && filterParam.BucketSize != "" { - periodInterval = filterParam.BucketSize + periodInterval = filterParam.BucketSize } - if verInfo.Number==""{ + if verInfo.Number == "" { panic("invalid version") } - intervalField, err := elastic.GetDateHistogramIntervalField(verInfo.Distribution,verInfo.Number, periodInterval ) + intervalField, err := elastic.GetDateHistogramIntervalField(verInfo.Distribution, verInfo.Number, periodInterval) if err != nil { return nil, fmt.Errorf("get interval field error: %w", err) } timeAggs := util.MapStr{ "time_buckets": util.MapStr{ "date_histogram": util.MapStr{ - "field": rule.Resource.TimeField, + "field": rule.Resource.TimeField, intervalField: periodInterval, }, "aggs": basicAggs, @@ -107,7 +107,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F if grpLength := len(groups); grpLength > 0 { var lastGroupAgg util.MapStr - for i := grpLength-1; i>=0; i-- { + for i := grpLength - 1; i >= 0; i-- { limit := groups[i].Limit //top group 10 if limit <= 0 { @@ -116,7 +116,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F groupAgg := util.MapStr{ "terms": util.MapStr{ "field": groups[i].Field, - "size": limit, + "size": limit, }, } groupID := util.GetUUID() @@ -124,7 +124,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F groupAgg["aggs"] = util.MapStr{ groupID: lastGroupAgg, } - }else{ + } else { groupAgg["aggs"] = timeAggs } lastGroupAgg = groupAgg @@ -132,29 +132,30 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F rootAggs = util.MapStr{ util.GetUUID(): lastGroupAgg, } - }else{ + } else { rootAggs = timeAggs } if len(filter) > 0 { rootAggs = util.MapStr{ "filter_agg": util.MapStr{ "filter": filter, - "aggs": rootAggs, + "aggs": rootAggs, }, } } return util.MapStr{ - "size": 0, + "size": 0, "query": timeFilter, - "aggs": rootAggs, + "aggs": rootAggs, }, nil } -//generateAgg convert statistic of metric item to elasticsearch aggregation -func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]interface{}{ + +// generateAgg convert statistic of metric item to elasticsearch aggregation +func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]interface{} { var ( aggType = "value_count" - field = metricItem.Field + field = metricItem.Field ) if field == "" || field == "*" { field = "_id" @@ -171,7 +172,7 @@ func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]int isPipeline = true case "medium": // from es version 6.6 aggType = "median_absolute_deviation" - case "p99", "p95","p90","p80","p50": + case "p99", "p95", "p90", "p80", "p50": aggType = "percentiles" percentStr := strings.TrimPrefix(metricItem.Statistic, "p") percent, _ = strconv.ParseFloat(percentStr, 32) @@ -187,7 +188,7 @@ func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]int aggType: aggValue, }, } - if !isPipeline{ + if !isPipeline { return aggs } pipelineAggID := util.GetUUID() @@ -200,8 +201,8 @@ func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]int return aggs } -func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[string]interface{}, error){ - if !fq.IsComplex(){ +func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[string]interface{}, error) { + if !fq.IsComplex() { q := map[string]interface{}{} if len(fq.Values) == 0 { return nil, fmt.Errorf("values should not be empty") @@ -267,14 +268,14 @@ func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[str filterQueries []alerting.FilterQuery ) - if len(fq.Not) >0 { + if len(fq.Not) > 0 { boolOperator = "must_not" filterQueries = fq.Not - }else if len(fq.Or) > 0 { + } else if len(fq.Or) > 0 { boolOperator = "should" filterQueries = fq.Or - }else { + } else { boolOperator = "must" filterQueries = fq.And } @@ -299,15 +300,15 @@ func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[str return resultQuery, nil } -func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (start, end interface{}){ +func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (start, end interface{}) { var ( timeStart interface{} - timeEnd interface{} + timeEnd interface{} ) if filterParam != nil { timeStart = filterParam.Start timeEnd = filterParam.End - }else{ + } else { var ( units string value int @@ -316,23 +317,23 @@ func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) ( if err != nil { return nil, fmt.Errorf("parse bucket size of rule [%s] error: %v", rule.Name, err) } - if intervalDuration / time.Hour >= 1 { + if intervalDuration/time.Hour >= 1 { units = "h" value = int(intervalDuration / time.Hour) - }else if intervalDuration / time.Minute >= 1{ + } else if intervalDuration/time.Minute >= 1 { units = "m" value = int(intervalDuration / time.Minute) - }else if intervalDuration / time.Second >= 1 { + } else if intervalDuration/time.Second >= 1 { units = "s" value = int(intervalDuration / time.Second) - }else{ + } else { return nil, fmt.Errorf("period interval: %s is too small", rule.Metrics.BucketSize) } bucketCount := rule.Conditions.GetMinimumPeriodMatch() + 1 if bucketCount <= 0 { bucketCount = 1 } - duration, err := time.ParseDuration(fmt.Sprintf("%d%s", value * bucketCount, units)) + duration, err := time.ParseDuration(fmt.Sprintf("%d%s", value*bucketCount, units)) if err != nil { return nil, err } @@ -342,7 +343,7 @@ func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) ( return timeStart, timeEnd } -func (engine *Engine) generateTimeFilter(rule *alerting.Rule, filterParam *alerting.FilterParam) (map[string]interface{}, error){ +func (engine *Engine) generateTimeFilter(rule *alerting.Rule, filterParam *alerting.FilterParam) (map[string]interface{}, error) { timeStart, timeEnd := getQueryTimeRange(rule, filterParam) timeQuery := util.MapStr{ "range": util.MapStr{ @@ -360,8 +361,8 @@ func (engine *Engine) GenerateRawFilter(rule *alerting.Rule, filterParam *alerti var err error if rule.Resource.RawFilter != nil { query = util.DeepCopy(rule.Resource.RawFilter).(map[string]interface{}) - }else{ - if !rule.Resource.Filter.IsEmpty(){ + } else { + if !rule.Resource.Filter.IsEmpty() { query, err = engine.ConvertFilterQueryToDsl(&rule.Resource.Filter) if err != nil { return nil, err @@ -405,7 +406,7 @@ func (engine *Engine) GenerateRawFilter(rule *alerting.Rule, filterParam *alerti return query, nil } -func (engine *Engine) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam)(*alerting.QueryResult, error){ +func (engine *Engine) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (*alerting.QueryResult, error) { esClient := elastic.GetClient(rule.Resource.ID) queryResult := &alerting.QueryResult{} indexName := strings.Join(rule.Resource.Objects, ",") @@ -470,7 +471,7 @@ func (engine *Engine) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.Fi queryResult.MetricData = metricData return queryResult, nil } -func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam)([]alerting.MetricData, *alerting.QueryResult, error){ +func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam) ([]alerting.MetricData, *alerting.QueryResult, error) { queryResult, err := engine.ExecuteQuery(rule, filterParam) if err != nil { return nil, queryResult, err @@ -525,7 +526,7 @@ func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, return nil, queryResult, err } if r, ok := result.(float64); ok { - if math.IsNaN(r) || math.IsInf(r, 0 ){ + if math.IsNaN(r) || math.IsInf(r, 0) { if !isFilterNaN { targetData.Data["result"] = append(targetData.Data["result"], []interface{}{timestamp, math.NaN()}) } @@ -540,10 +541,11 @@ func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, } return targetMetricData, queryResult, nil } -//CheckCondition check whether rule conditions triggered or not -//if triggered returns an ConditionResult -//sort conditions by priority desc before check , and then if condition is true, then continue check another group -func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionResult, error){ + +// CheckCondition check whether rule conditions triggered or not +// if triggered returns an ConditionResult +// sort conditions by priority desc before check , and then if condition is true, then continue check another group +func (engine *Engine) CheckCondition(rule *alerting.Rule) (*alerting.ConditionResult, error) { var resultItems []alerting.ConditionResultItem targetMetricData, queryResult, err := engine.GetTargetMetricData(rule, true, nil) conditionResult := &alerting.ConditionResult{ @@ -558,7 +560,7 @@ func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionRes return alerting.PriorityWeights[rule.Conditions.Items[i].Priority] > alerting.PriorityWeights[rule.Conditions.Items[j].Priority] }) } - LoopCondition: + LoopCondition: for _, cond := range rule.Conditions.Items { conditionExpression, err := cond.GenerateConditionExpression() if err != nil { @@ -580,8 +582,8 @@ func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionRes if targetData.Data[dataKey][i][1] == nil { continue } - if r, ok := targetData.Data[dataKey][i][1].(float64); ok { - if math.IsNaN(r){ + if r, ok := targetData.Data[dataKey][i][1].(float64); ok { + if math.IsNaN(r) { continue } } @@ -593,19 +595,19 @@ func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionRes } if evaluateResult == true { triggerCount += 1 - }else { + } else { triggerCount = 0 } if triggerCount >= cond.MinimumPeriodMatch { log.Debugf("triggered condition %v, groups: %v\n", cond, targetData.GroupValues) resultItem := alerting.ConditionResultItem{ - GroupValues: targetData.GroupValues, - ConditionItem: &cond, - ResultValue: targetData.Data[dataKey][i][1], + GroupValues: targetData.GroupValues, + ConditionItem: &cond, + ResultValue: targetData.Data[dataKey][i][1], IssueTimestamp: targetData.Data[dataKey][i][0], RelationValues: map[string]interface{}{}, } - for _, metric := range rule.Metrics.Items{ + for _, metric := range rule.Metrics.Items { resultItem.RelationValues[metric.Name] = queryResult.MetricData[idx].Data[metric.Name][i][1] } resultItems = append(resultItems, resultItem) @@ -623,30 +625,30 @@ func (engine *Engine) Do(rule *alerting.Rule) error { var ( alertItem *alerting.Alert - err error + err error ) defer func() { if err != nil && alertItem == nil { alertItem = &alerting.Alert{ - ID: util.GetUUID(), - Created: time.Now(), - Updated: time.Now(), - RuleID: rule.ID, - RuleName: rule.Name, - ResourceID: rule.Resource.ID, + ID: util.GetUUID(), + Created: time.Now(), + Updated: time.Now(), + RuleID: rule.ID, + RuleName: rule.Name, + ResourceID: rule.Resource.ID, ResourceName: rule.Resource.Name, - Expression: rule.Metrics.Expression, - Objects: rule.Resource.Objects, - State: alerting.AlertStateError, + Expression: rule.Metrics.Expression, + Objects: rule.Resource.Objects, + State: alerting.AlertStateError, //Priority: "undefine", Error: err.Error(), } } if alertItem != nil { - if err != nil{ + if err != nil { alertItem.State = alerting.AlertStateError alertItem.Error = err.Error() - }else { + } else { for _, actionResult := range alertItem.ActionExecutionResults { if actionResult.Error != "" { alertItem.Error = actionResult.Error @@ -670,24 +672,24 @@ func (engine *Engine) Do(rule *alerting.Rule) error { rule.Conditions.Items[i].Expression = strings.ReplaceAll(expression, "result", metricExpression) } alertItem = &alerting.Alert{ - ID: util.GetUUID(), - Created: time.Now(), - Updated: time.Now(), - RuleID: rule.ID, - RuleName: rule.Name, - ResourceID: rule.Resource.ID, + ID: util.GetUUID(), + Created: time.Now(), + Updated: time.Now(), + RuleID: rule.ID, + RuleName: rule.Name, + ResourceID: rule.Resource.ID, ResourceName: rule.Resource.Name, - Expression: rule.Metrics.Expression, - Objects: rule.Resource.Objects, - Conditions: rule.Conditions, - State: alerting.AlertStateOK, + Expression: rule.Metrics.Expression, + Objects: rule.Resource.Objects, + Conditions: rule.Conditions, + State: alerting.AlertStateOK, } checkResults, err := engine.CheckCondition(rule) alertItem.ConditionResult = checkResults if err != nil { return err } - alertMessage, err := getLastAlertMessage(rule.ID, 2 * time.Minute) + alertMessage, err := getLastAlertMessage(rule.ID, 2*time.Minute) if err != nil { return fmt.Errorf("get alert message error: %w", err) } @@ -696,12 +698,12 @@ func (engine *Engine) Do(rule *alerting.Rule) error { if len(conditionResults) == 0 { alertItem.Priority = "" if checkResults.QueryResult.Nodata { - alertItem.State = alerting.AlertStateNodata + alertItem.State = alerting.AlertStateNodata } - if alertMessage != nil && alertMessage.Status != alerting.MessageStateRecovered && !checkResults.QueryResult.Nodata { + if alertMessage != nil && alertMessage.Status != alerting.MessageStateRecovered && !checkResults.QueryResult.Nodata { alertMessage.Status = alerting.MessageStateRecovered - alertMessage.ResourceID = rule.Resource.ID + alertMessage.ResourceID = rule.Resource.ID alertMessage.ResourceName = rule.Resource.Name err = saveAlertMessage(alertMessage) if err != nil { @@ -710,12 +712,12 @@ func (engine *Engine) Do(rule *alerting.Rule) error { // todo add recover notification to inner system message // send recover message to channel recoverCfg := rule.RecoveryNotificationConfig - if recoverCfg != nil && recoverCfg.EventEnabled && recoverCfg.Enabled { + if recoverCfg != nil && recoverCfg.EventEnabled && recoverCfg.Enabled { paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{ - alerting2.ParamEventID: alertMessage.ID, - alerting2.ParamTimestamp: alertItem.Created.Unix(), - "duration": alertItem.Created.Sub(alertMessage.Created).String(), - "trigger_at": alertMessage.Created.Unix(), + alerting2.ParamEventID: alertMessage.ID, + alerting2.ParamTimestamp: alertItem.Created.Unix(), + "duration": alertItem.Created.Sub(alertMessage.Created).String(), + "trigger_at": alertMessage.Created.Unix(), }) err = attachTitleMessageToCtx(recoverCfg.Title, recoverCfg.Message, paramsCtx) if err != nil { @@ -747,9 +749,9 @@ func (engine *Engine) Do(rule *alerting.Rule) error { triggerAt = alertMessage.Created } paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{ - alerting2.ParamTimestamp: alertItem.Created.Unix(), - "duration": alertItem.Created.Sub(triggerAt).String(), - "trigger_at": triggerAt.Unix(), + alerting2.ParamTimestamp: alertItem.Created.Unix(), + "duration": alertItem.Created.Sub(triggerAt).String(), + "trigger_at": triggerAt.Unix(), }) alertItem.Priority = priority @@ -772,8 +774,8 @@ func (engine *Engine) Do(rule *alerting.Rule) error { Priority: priority, Title: alertItem.Title, Message: alertItem.Message, - Tags: rule.Tags, - Category: rule.Category, + Tags: rule.Tags, + Category: rule.Category, } alertMessage = msg err = saveAlertMessage(msg) @@ -797,18 +799,18 @@ func (engine *Engine) Do(rule *alerting.Rule) error { if err != nil { return fmt.Errorf("failed to create notification, err: %w", err) } - }else{ + } else { alertMessage.Title = alertItem.Title alertMessage.Message = alertItem.Message - alertMessage.ResourceID = rule.Resource.ID - alertMessage.ResourceName= rule.Resource.Name + alertMessage.ResourceID = rule.Resource.ID + alertMessage.ResourceName = rule.Resource.Name alertMessage.Priority = priority err = saveAlertMessage(alertMessage) if err != nil { return fmt.Errorf("save alert message error: %w", err) } } - log.Debugf("check condition result of rule %s is %v", conditionResults, rule.ID ) + log.Debugf("check condition result of rule %s is %v", conditionResults, rule.ID) // if alert message status equals ignored , then skip sending message to channel if alertMessage != nil && alertMessage.Status == alerting.MessageStateIgnored { @@ -834,7 +836,7 @@ func (engine *Engine) Do(rule *alerting.Rule) error { if err != nil { return fmt.Errorf("get last notification time from kv error: %w", err) } - if !tm.IsZero(){ + if !tm.IsZero() { rule.LastNotificationTime = tm } } @@ -845,8 +847,8 @@ func (engine *Engine) Do(rule *alerting.Rule) error { paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{ alerting2.ParamTimestamp: alertItem.Created.Unix(), "priority": priority, - "duration": alertItem.Created.Sub(alertMessage.Created).String(), - "trigger_at": alertMessage.Created.Unix(), + "duration": alertItem.Created.Sub(alertMessage.Created).String(), + "trigger_at": alertMessage.Created.Unix(), }) if alertMessage != nil { paramsCtx[alerting2.ParamEventID] = alertMessage.ID @@ -874,12 +876,12 @@ func (engine *Engine) Do(rule *alerting.Rule) error { rule.LastTermStartTime = alertMessage.Created } if time.Now().Sub(rule.LastTermStartTime.Local()) > throttlePeriod { - if rule.LastEscalationTime.IsZero(){ + if rule.LastEscalationTime.IsZero() { tm, err := readTimeFromKV(alerting2.KVLastEscalationTime, []byte(rule.ID)) if err != nil { return fmt.Errorf("get last escalation time from kv error: %w", err) } - if !tm.IsZero(){ + if !tm.IsZero() { rule.LastEscalationTime = tm } } @@ -899,10 +901,10 @@ func (engine *Engine) Do(rule *alerting.Rule) error { return nil } -func attachTitleMessageToCtx(title, message string, paramsCtx map[string]interface{}) error{ +func attachTitleMessageToCtx(title, message string, paramsCtx map[string]interface{}) error { var ( tplBytes []byte - err error + err error ) tplBytes, err = common.ResolveMessage(message, paramsCtx) if err != nil { @@ -917,23 +919,23 @@ func attachTitleMessageToCtx(title, message string, paramsCtx map[string]interfa return nil } -func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult, extraParams map[string]interface{} ) map[string]interface{}{ +func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult, extraParams map[string]interface{}) map[string]interface{} { var ( conditionParams []util.MapStr firstGroupValue string - firstThreshold string - priority string + firstThreshold string + priority string ) if len(checkResults.ResultItems) > 0 { priority = checkResults.ResultItems[0].ConditionItem.Priority sort.Slice(checkResults.ResultItems, func(i, j int) bool { - if alerting.PriorityWeights[checkResults.ResultItems[i].ConditionItem.Priority] > alerting.PriorityWeights[checkResults.ResultItems[j].ConditionItem.Priority] { + if alerting.PriorityWeights[checkResults.ResultItems[i].ConditionItem.Priority] > alerting.PriorityWeights[checkResults.ResultItems[j].ConditionItem.Priority] { return true } return false }) sort.Slice(checkResults.ResultItems, func(i, j int) bool { - if vi, ok := checkResults.ResultItems[i].ResultValue.(float64); ok { + if vi, ok := checkResults.ResultItems[i].ResultValue.(float64); ok { if vj, ok := checkResults.ResultItems[j].ResultValue.(float64); ok { return vi > vj } @@ -972,10 +974,10 @@ func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult max = checkResults.QueryResult.Max if v, ok := min.(int64); ok { //expand 60s - min = time.UnixMilli(v).Add(-time.Second*60).UTC().Format("2006-01-02T15:04:05.999Z") + min = time.UnixMilli(v).Add(-time.Second * 60).UTC().Format("2006-01-02T15:04:05.999Z") } if v, ok := max.(int64); ok { - max = time.UnixMilli(v).Add(time.Second*60).UTC().Format("2006-01-02T15:04:05.999Z") + max = time.UnixMilli(v).Add(time.Second * 60).UTC().Format("2006-01-02T15:04:05.999Z") } } paramsCtx := util.MapStr{ @@ -983,14 +985,14 @@ func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult alerting2.ParamResourceID: rule.Resource.ID, alerting2.ParamResourceName: rule.Resource.Name, alerting2.ParamResults: conditionParams, - "objects": rule.Resource.Objects, + "objects": rule.Resource.Objects, "first_group_value": firstGroupValue, "first_threshold": firstThreshold, "rule_name": rule.Name, "priority": priority, - "min": min, - "max": max, - "env": envVariables, + "min": min, + "max": max, + "env": envVariables, } err = util.MergeFields(paramsCtx, extraParams, true) if err != nil { @@ -1004,30 +1006,30 @@ func (engine *Engine) Test(rule *alerting.Rule, msgType string) ([]alerting.Acti if err != nil { return nil, fmt.Errorf("check condition error:%w", err) } - alertMessage, err := getLastAlertMessage(rule.ID, 2 * time.Minute) + alertMessage, err := getLastAlertMessage(rule.ID, 2*time.Minute) if err != nil { return nil, fmt.Errorf("get alert message error: %w", err) } var actionResults []alerting.ActionExecutionResult - now := time.Now() + now := time.Now() triggerAt := now if alertMessage != nil { triggerAt = alertMessage.Created } - paramsCtx := newParameterCtx(rule, checkResults,util.MapStr{ - alerting2.ParamEventID: util.GetUUID(), - alerting2.ParamTimestamp: now.Unix(), - "duration": now.Sub(triggerAt).String(), - "trigger_at": triggerAt.Unix(), - } ) + paramsCtx := newParameterCtx(rule, checkResults, util.MapStr{ + alerting2.ParamEventID: util.GetUUID(), + alerting2.ParamTimestamp: now.Unix(), + "duration": now.Sub(triggerAt).String(), + "trigger_at": triggerAt.Unix(), + }) if msgType == "escalation" || msgType == "notification" { title, message := rule.GetNotificationTitleAndMessage() err = attachTitleMessageToCtx(title, message, paramsCtx) if err != nil { return nil, err } - }else if msgType == "recover_notification" { + } else if msgType == "recover_notification" { if rule.RecoveryNotificationConfig == nil { return nil, fmt.Errorf("recovery notification must not be empty") } @@ -1035,7 +1037,7 @@ func (engine *Engine) Test(rule *alerting.Rule, msgType string) ([]alerting.Acti if err != nil { return nil, err } - }else{ + } else { return nil, fmt.Errorf("unkonwn parameter msg type") } @@ -1060,7 +1062,7 @@ func (engine *Engine) Test(rule *alerting.Rule, msgType string) ([]alerting.Acti } if len(channels) > 0 { actionResults, _ = performChannels(channels, paramsCtx, true) - }else{ + } else { return nil, fmt.Errorf("no useable channel") } return actionResults, nil @@ -1071,8 +1073,8 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra var actionResults []alerting.ActionExecutionResult for _, channel := range channels { var ( - errStr string - resBytes []byte + errStr string + resBytes []byte messageBytes []byte ) _, err := common.RetrieveChannel(&channel, raiseChannelEnabledErr) @@ -1080,7 +1082,7 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra log.Error(err) errCount++ errStr = err.Error() - }else{ + } else { if !channel.Enabled { continue } @@ -1094,17 +1096,15 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra Result: string(resBytes), Error: errStr, Message: string(messageBytes), - ExecutionTime: int(time.Now().UnixNano()/1e6), + ExecutionTime: int(time.Now().UnixNano() / 1e6), ChannelType: channel.SubType, ChannelName: channel.Name, - ChannelID: channel.ID, + ChannelID: channel.ID, }) } return actionResults, errCount } - - func (engine *Engine) GenerateTask(rule alerting.Rule) func(ctx context.Context) { return func(ctx context.Context) { defer func() { @@ -1120,29 +1120,29 @@ func (engine *Engine) GenerateTask(rule alerting.Rule) func(ctx context.Context) } } -func CollectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData){ +func CollectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData) { if aggM, ok := agg.(map[string]interface{}); ok { if targetAgg, ok := aggM["filter_agg"]; ok { collectMetricData(targetAgg, groupValues, metricData) - }else{ + } else { collectMetricData(aggM, groupValues, metricData) } } } -func collectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData){ +func collectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData) { if aggM, ok := agg.(map[string]interface{}); ok { if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok { if bks, ok := timeBks["buckets"].([]interface{}); ok { md := alerting.MetricData{ - Data: map[string][]alerting.TimeMetricData{}, + Data: map[string][]alerting.TimeMetricData{}, GroupValues: strings.Split(groupValues, "*"), } for _, bk := range bks { - if bkM, ok := bk.(map[string]interface{}); ok{ + if bkM, ok := bk.(map[string]interface{}); ok { for k, v := range bkM { - if k == "key" || k == "key_as_string" || k== "doc_count"{ + if k == "key" || k == "key_as_string" || k == "doc_count" { continue } if len(k) > 5 { //just store a,b,c @@ -1151,9 +1151,9 @@ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerti if vm, ok := v.(map[string]interface{}); ok { if metricVal, ok := vm["value"]; ok { md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], metricVal}) - }else{ + } else { //percentiles agg type - switch vm["values"].(type) { + switch vm["values"].(type) { case []interface{}: for _, val := range vm["values"].([]interface{}) { if valM, ok := val.(map[string]interface{}); ok { @@ -1163,7 +1163,7 @@ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerti } case map[string]interface{}: for _, val := range vm["values"].(map[string]interface{}) { - md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], val}) + md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], val}) break } } @@ -1176,18 +1176,18 @@ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerti } } - *metricData = append(*metricData,md) + *metricData = append(*metricData, md) } - }else{ + } else { for k, v := range aggM { - if k == "key" || k== "doc_count"{ + if k == "key" || k == "doc_count" { continue } if vm, ok := v.(map[string]interface{}); ok { if bks, ok := vm["buckets"].([]interface{}); ok { for _, bk := range bks { - if bkVal, ok := bk.(map[string]interface{}); ok { + if bkVal, ok := bk.(map[string]interface{}); ok { currentGroup := bkVal["key"].(string) newGroupValues := currentGroup if groupValues != "" { @@ -1227,20 +1227,20 @@ func getLastAlertMessageFromES(ruleID string) (*alerting.AlertMessage, error) { q := orm.Query{ RawQuery: util.MustToJSONBytes(queryDsl), } - err, searchResult := orm.Search(alerting.AlertMessage{}, &q ) + err, searchResult := orm.Search(alerting.AlertMessage{}, &q) if err != nil { - return nil, err + return nil, err } if len(searchResult.Result) == 0 { - return nil, nil + return nil, nil } messageBytes := util.MustToJSONBytes(searchResult.Result[0]) message := &alerting.AlertMessage{} - err = util.FromJSONBytes(messageBytes, message) + err = util.FromJSONBytes(messageBytes, message) return message, err } -func getLastAlertMessage(ruleID string, duration time.Duration) (*alerting.AlertMessage, error ){ +func getLastAlertMessage(ruleID string, duration time.Duration) (*alerting.AlertMessage, error) { messageBytes, err := kv.GetValue(alerting2.KVLastMessageState, []byte(ruleID)) if err != nil { return nil, err @@ -1280,15 +1280,14 @@ func saveAlertMessage(message *alerting.AlertMessage) error { return err } - -func readTimeFromKV(bucketKey string, key []byte)(time.Time, error){ +func readTimeFromKV(bucketKey string, key []byte) (time.Time, error) { timeBytes, err := kv.GetValue(bucketKey, key) zeroTime := time.Time{} if err != nil { return zeroTime, err } - timeStr := string(timeBytes) - if timeStr != ""{ + timeStr := string(timeBytes) + if timeStr != "" { return time.ParseInLocation(time.RFC3339, string(timeBytes), time.UTC) } return zeroTime, nil diff --git a/service/alerting/elasticsearch/engine_test.go b/service/alerting/elasticsearch/engine_test.go index 09d4c9b7..878c44a0 100644 --- a/service/alerting/elasticsearch/engine_test.go +++ b/service/alerting/elasticsearch/engine_test.go @@ -40,16 +40,16 @@ import ( "time" ) -func TestEngine( t *testing.T) { +func TestEngine(t *testing.T) { rule := alerting.Rule{ - ID: util.GetUUID(), + ID: util.GetUUID(), Created: time.Now(), Updated: time.Now(), Enabled: true, Resource: alerting.Resource{ - ID: "c8i18llath2blrusdjng", - Type: "elasticsearch", - Objects: []string{".infini_metrics*"}, + ID: "c8i18llath2blrusdjng", + Type: "elasticsearch", + Objects: []string{".infini_metrics*"}, TimeField: "timestamp", Filter: alerting.FilterQuery{ And: []alerting.FilterQuery{ @@ -123,9 +123,9 @@ func TestEngine( t *testing.T) { ThrottlePeriod: "1h", AcceptTimeRange: alerting.TimeRange{ Start: "8:00", - End: "21:00", + End: "21:00", }, - EscalationEnabled: true, + EscalationEnabled: true, EscalationThrottlePeriod: "30m", }, } @@ -143,11 +143,11 @@ func TestEngine( t *testing.T) { //fmt.Println(util.MustToJSON(filter)) } -func TestGenerateAgg(t *testing.T) { +func TestGenerateAgg(t *testing.T) { eng := &Engine{} agg := eng.generateAgg(&insight.MetricItem{ - Name: "a", - Field: "cpu.percent", + Name: "a", + Field: "cpu.percent", Statistic: "p99", }) fmt.Println(util.MustToJSON(agg)) @@ -210,20 +210,20 @@ func TestGeneratePercentilesAggQuery(t *testing.T) { esClient := elasticsearch.ESAPIV7{} esClient.Elasticsearch = cfg.ID esClient.Version = elastic.Version{ - Number: "7.10.2", - Major: 7, + Number: "7.10.2", + Major: 7, Distribution: elastic.Elasticsearch, } elastic.UpdateClient(cfg, &esClient) rule := alerting.Rule{ - ID: util.GetUUID(), + ID: util.GetUUID(), Created: time.Now(), Updated: time.Now(), Enabled: true, Resource: alerting.Resource{ - ID: cfg.ID, - Type: "elasticsearch", - Objects: []string{".infini_metrics*"}, + ID: cfg.ID, + Type: "elasticsearch", + Objects: []string{".infini_metrics*"}, TimeField: "timestamp", RawFilter: map[string]interface{}{ "bool": map[string]interface{}{ @@ -271,9 +271,9 @@ func TestGeneratePercentilesAggQuery(t *testing.T) { ThrottlePeriod: "1h", AcceptTimeRange: alerting.TimeRange{ Start: "08:00", - End: "21:00", + End: "21:00", }, - EscalationEnabled: true, + EscalationEnabled: true, EscalationThrottlePeriod: "30m", }, } @@ -289,21 +289,21 @@ func TestConvertFilterQuery(t *testing.T) { fq := alerting.FilterQuery{ And: []alerting.FilterQuery{ { - Field: "metadata.category", - Values: []string{"elasticsearch"}, + Field: "metadata.category", + Values: []string{"elasticsearch"}, Operator: "equals", }, { - Field: "metadata.name", - Values: []string{"index_stats", "node_stats"}, + Field: "metadata.name", + Values: []string{"index_stats", "node_stats"}, Operator: "in", }, { Not: []alerting.FilterQuery{ { - Field: "timestamp", + Field: "timestamp", Operator: "gt", - Values: []string{"2022-04-16T16:16:39.168605+08:00"}, + Values: []string{"2022-04-16T16:16:39.168605+08:00"}, }, }, }, @@ -318,4 +318,4 @@ func TestConvertFilterQuery(t *testing.T) { if dsl := util.MustToJSON(q); dsl != targetDsl { t.Errorf("expect dsl %s but got %s", targetDsl, dsl) } -} \ No newline at end of file +} diff --git a/service/alerting/elasticsearch/init.go b/service/alerting/elasticsearch/init.go index c6209d16..80281fb0 100644 --- a/service/alerting/elasticsearch/init.go +++ b/service/alerting/elasticsearch/init.go @@ -29,7 +29,7 @@ package elasticsearch import "infini.sh/console/service/alerting" -func init(){ +func init() { eng := Engine{} alerting.RegistEngine("elasticsearch", &eng) } diff --git a/service/alerting/engine.go b/service/alerting/engine.go index 106c6fbe..fbf19b68 100644 --- a/service/alerting/engine.go +++ b/service/alerting/engine.go @@ -36,17 +36,18 @@ import ( type Engine interface { GenerateQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (interface{}, error) - ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam)(*alerting.QueryResult, error) - CheckCondition(rule *alerting.Rule)(*alerting.ConditionResult, error) + ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (*alerting.QueryResult, error) + CheckCondition(rule *alerting.Rule) (*alerting.ConditionResult, error) GenerateTask(rule alerting.Rule) func(ctx context.Context) Test(rule *alerting.Rule, msgType string) ([]alerting.ActionExecutionResult, error) - GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam)([]alerting.MetricData, *alerting.QueryResult, error) + GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam) ([]alerting.MetricData, *alerting.QueryResult, error) } var ( - alertEngines = map[string] Engine{} + alertEngines = map[string]Engine{} alertEnginesMutex = sync.RWMutex{} ) + func RegistEngine(typ string, engine Engine) { alertEnginesMutex.Lock() defer alertEnginesMutex.Unlock() @@ -61,4 +62,4 @@ func GetEngine(typ string) Engine { panic(fmt.Sprintf("alert engine of type: %s not found", typ)) } return eng -} \ No newline at end of file +} diff --git a/service/alerting/env.go b/service/alerting/env.go index 40122d4f..5cb1ccfa 100644 --- a/service/alerting/env.go +++ b/service/alerting/env.go @@ -37,7 +37,7 @@ import ( log "src/github.com/cihub/seelog" ) -func GetEnvVariables() (map[string]interface{}, error){ +func GetEnvVariables() (map[string]interface{}, error) { configFile := global.Env().GetConfigFile() envVariables, err := config.LoadEnvVariables(configFile) if err != nil { @@ -64,7 +64,7 @@ func GetEnvVariables() (map[string]interface{}, error){ return envVariables, nil } -func GetInnerConsoleEndpoint() (string, error){ +func GetInnerConsoleEndpoint() (string, error) { appConfig := &config2.AppConfig{ UI: config2.UIConfig{}, } diff --git a/service/alerting/funcs/bytes.go b/service/alerting/funcs/bytes.go index ff3bde19..838f817b 100644 --- a/service/alerting/funcs/bytes.go +++ b/service/alerting/funcs/bytes.go @@ -31,4 +31,4 @@ import "infini.sh/framework/core/util" func formatBytes(precision int, bytes float64) string { return util.FormatBytes(bytes, precision) -} \ No newline at end of file +} diff --git a/service/alerting/funcs/date.go b/service/alerting/funcs/date.go index 2df6bd47..caace3cf 100644 --- a/service/alerting/funcs/date.go +++ b/service/alerting/funcs/date.go @@ -32,10 +32,10 @@ import ( "time" ) -func datetimeInZone(zone string, date interface{}) string{ +func datetimeInZone(zone string, date interface{}) string { return _dateInZone("2006-01-02 15:04:05", date, zone) } -func datetime(date interface{}) string{ +func datetime(date interface{}) string { return _dateInZone("2006-01-02 15:04:05", date, "Local") } @@ -58,7 +58,7 @@ func _dateInZone(fmt string, date interface{}, zone string) string { t = *date case int64: if date > 1e12 { - date = date/1000 + date = date / 1000 } t = time.Unix(date, 0) case int: diff --git a/service/alerting/funcs/elastic.go b/service/alerting/funcs/elastic.go index f01f63b4..62c3a8b7 100644 --- a/service/alerting/funcs/elastic.go +++ b/service/alerting/funcs/elastic.go @@ -35,7 +35,7 @@ import ( "strings" ) -func lookup(directory string, id string) interface{}{ +func lookup(directory string, id string) interface{} { directory = strings.TrimSpace(directory) if directory == "" { return "empty_directory" @@ -46,8 +46,8 @@ func lookup(directory string, id string) interface{}{ kv := strings.Split(part, "=") if len(kv) == 2 { k := strings.TrimSpace(kv[0]) - kvs[k]= strings.TrimSpace(kv[1]) - }else{ + kvs[k] = strings.TrimSpace(kv[1]) + } else { log.Debugf("got unexpected directory part: %s", part) } } @@ -59,10 +59,10 @@ func lookup(directory string, id string) interface{}{ return kvs["default"] } -func lookupMetadata(object string, property string, defaultValue string, id string) interface{}{ +func lookupMetadata(object string, property string, defaultValue string, id string) interface{} { var ( cfgM = util.MapStr{} - buf []byte + buf []byte ) switch object { case "cluster": @@ -127,4 +127,4 @@ func lookupMetadata(object string, property string, defaultValue string, id stri return v } return defaultValue -} \ No newline at end of file +} diff --git a/service/alerting/funcs/function.go b/service/alerting/funcs/function.go index d6c7b173..f8d78069 100644 --- a/service/alerting/funcs/function.go +++ b/service/alerting/funcs/function.go @@ -41,21 +41,21 @@ func GenericFuncMap() template.FuncMap { } var genericMap = map[string]interface{}{ - "hello": func() string { return "Hello!" }, - "format_bytes": formatBytes, - "to_fixed": toFixed, - "date": date, - "date_in_zone": dateInZone, - "datetime": datetime, + "hello": func() string { return "Hello!" }, + "format_bytes": formatBytes, + "to_fixed": toFixed, + "date": date, + "date_in_zone": dateInZone, + "datetime": datetime, "datetime_in_zone": datetimeInZone, - "to_upper": strings.ToUpper, - "to_lower": strings.ToLower, - "add": add, - "sub": sub, - "div": div, - "mul": mul, - "lookup": lookup, - "str_replace": replace, - "md_to_html": mdToHTML, + "to_upper": strings.ToUpper, + "to_lower": strings.ToLower, + "add": add, + "sub": sub, + "div": div, + "mul": mul, + "lookup": lookup, + "str_replace": replace, + "md_to_html": mdToHTML, //"get_keystore_secret": getKeystoreSecret, } diff --git a/service/alerting/funcs/numberic.go b/service/alerting/funcs/numberic.go index d4c487d0..8b928bd7 100644 --- a/service/alerting/funcs/numberic.go +++ b/service/alerting/funcs/numberic.go @@ -35,10 +35,10 @@ import ( "strconv" ) -func toFixed(precision int, num float64) float64{ +func toFixed(precision int, num float64) float64 { return util.ToFixed(num, precision) } -func add(a, b interface{}) float64{ +func add(a, b interface{}) float64 { av := ToFloat64(a) bv := ToFloat64(b) return av + bv @@ -125,4 +125,4 @@ func ToFloat64E(i interface{}) (float64, error) { default: return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) } -} \ No newline at end of file +} diff --git a/service/alerting/funcs/strings.go b/service/alerting/funcs/strings.go index 30675d4f..0f1ae3f8 100644 --- a/service/alerting/funcs/strings.go +++ b/service/alerting/funcs/strings.go @@ -37,7 +37,7 @@ import ( func substring(start, end int, s string) string { runes := []rune(s) length := len(runes) - if start < 0 || start > length || end < 0 || end > length{ + if start < 0 || start > length || end < 0 || end > length { return s } return string(runes[start:end]) @@ -59,4 +59,4 @@ func mdToHTML(mdText string) string { buf := markdown.Render(doc, renderer) return string(buf) -} \ No newline at end of file +} diff --git a/service/alerting/parameter.go b/service/alerting/parameter.go index 6bd0ae06..aa6e55d4 100644 --- a/service/alerting/parameter.go +++ b/service/alerting/parameter.go @@ -28,11 +28,11 @@ package alerting type ParameterMeta struct { - Name string `json:"name"` - Type string `json:"type"` //int, float, string, date, array, object - Description string `json:"description"` - Eg string `json:"eg,omitempty"` - Properties []ParameterMeta `json:"properties,omitempty"` + Name string `json:"name"` + Type string `json:"type"` //int, float, string, date, array, object + Description string `json:"description"` + Eg string `json:"eg,omitempty"` + Properties []ParameterMeta `json:"properties,omitempty"` } func GetTemplateParameters() []ParameterMeta { From eb7355b25533b500f06a9bb15bc7b14fdbfdff06 Mon Sep 17 00:00:00 2001 From: luohoufu Date: Sat, 11 Jan 2025 19:24:41 +0800 Subject: [PATCH 2/7] chore: remove fetch-depth --- .github/workflows/unit_test.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/unit_test.yml b/.github/workflows/unit_test.yml index bd213dbe..e63ef543 100644 --- a/.github/workflows/unit_test.yml +++ b/.github/workflows/unit_test.yml @@ -18,13 +18,11 @@ jobs: - name: Checkout current repository uses: actions/checkout@v4 with: - fetch-depth: 0 path: console - name: Checkout framework repository uses: actions/checkout@v4 with: - fetch-depth: 0 repository: infinilabs/framework path: framework @@ -32,7 +30,6 @@ jobs: uses: actions/checkout@v4 with: ref: main - fetch-depth: 0 repository: infinilabs/framework-vendor path: vendor From f07f5afbd4e4d1009725862c272f07e1d3d096ce Mon Sep 17 00:00:00 2001 From: luohoufu Date: Tue, 14 Jan 2025 13:10:04 +0800 Subject: [PATCH 3/7] chore: add format and lint --- .github/workflows/unit_test.yml | 102 -------------------------------- .gitignore | 2 + config/generated.go | 10 ---- 3 files changed, 2 insertions(+), 112 deletions(-) delete mode 100644 .github/workflows/unit_test.yml delete mode 100644 config/generated.go diff --git a/.github/workflows/unit_test.yml b/.github/workflows/unit_test.yml deleted file mode 100644 index e63ef543..00000000 --- a/.github/workflows/unit_test.yml +++ /dev/null @@ -1,102 +0,0 @@ -name: Unit Test - -on: - pull_request: - branches: [ "main" ] - -defaults: - run: - shell: bash - -jobs: - build: - runs-on: ubuntu-latest - env: - GO_VERSION: 1.23.4 - NODEJS_VERSION: 16.20.2 - steps: - - name: Checkout current repository - uses: actions/checkout@v4 - with: - path: console - - - name: Checkout framework repository - uses: actions/checkout@v4 - with: - repository: infinilabs/framework - path: framework - - - name: Checkout framework-vendor - uses: actions/checkout@v4 - with: - ref: main - repository: infinilabs/framework-vendor - path: vendor - - - name: Set up nodejs toolchain - uses: actions/setup-node@v4 - with: - node-version: ${{ env.NODEJS_VERSION }} - - - name: Cache dependencies - uses: actions/cache@v4 - with: - path: | - node_modules - key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }} - restore-keys: | - ${{ runner.os }}-cnpm- - - - name: Check nodejs toolchain - run: | - if ! command -v cnpm >/dev/null 2>&1; then - npm install -g rimraf --quiet --no-progress - npm install -g cnpm@9.2.0 --quiet --no-progress - fi - node -v && npm -v && cnpm -v - - - name: Set up go toolchain - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION }} - check-latest: false - cache: true - - - name: Check go toolchain - run: go version - - - name: Cache Build Output - uses: actions/cache@v4 - with: - path: | - .public - key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}- - ${{ runner.os }}-build- - - - name: Unit test - env: - GOFLAGS: -tags=ci - run: | - echo Home path is $HOME - export WORKBASE=$HOME/go/src/infini.sh - export WORK=$WORKBASE/console - - # for test workspace - mkdir -p $HOME/go/src/ - ln -s $GITHUB_WORKSPACE $WORKBASE - - # for web build - cd $WORK/web - cnpm install --quiet --no-progress - cnpm run build --quiet - - # check work folder - ls -lrt $WORKBASE/ - ls -alrt $WORK - - # for unit test - cd $WORK - echo Testing code at $PWD ... - make test \ No newline at end of file diff --git a/.gitignore b/.gitignore index d36e7c1d..b21887af 100644 --- a/.gitignore +++ b/.gitignore @@ -32,5 +32,7 @@ appveyor.yml log/ .env generated_*.go +config/generated.go +config/generat*.go config/initialization.dsl config/system_config.yml diff --git a/config/generated.go b/config/generated.go deleted file mode 100644 index f52e02d3..00000000 --- a/config/generated.go +++ /dev/null @@ -1,10 +0,0 @@ -package config - -const LastCommitLog = "N/A" -const BuildDate = "N/A" - -const EOLDate = "N/A" - -const Version = "0.0.1-SNAPSHOT" - -const BuildNumber = "001" From 7750125a9c543634efbb8c064f4a04da9a8435e0 Mon Sep 17 00:00:00 2001 From: luohoufu Date: Tue, 14 Jan 2025 13:10:42 +0800 Subject: [PATCH 4/7] chore: add pr_check --- .github/workflows/pr_check.yml | 232 +++++++++++++++++++++++++++++++++ 1 file changed, 232 insertions(+) create mode 100644 .github/workflows/pr_check.yml diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml new file mode 100644 index 00000000..6df8ed80 --- /dev/null +++ b/.github/workflows/pr_check.yml @@ -0,0 +1,232 @@ +name: Unit Test + +on: + pull_request: + branches: [ "main" ] + +defaults: + run: + shell: bash + +env: + GO_VERSION: 1.23.4 + NODEJS_VERSION: 16.20.2 + PNAME: console + +jobs: + format_check: + runs-on: ubuntu-latest + steps: + - name: Checkout current repository + uses: actions/checkout@v4 + with: + path: ${{ env.PNAME }} + + - name: Checkout framework repository + uses: actions/checkout@v4 + with: + repository: infinilabs/framework + path: framework + + - name: Checkout framework-vendor + uses: actions/checkout@v4 + with: + ref: main + repository: infinilabs/framework-vendor + path: vendor + + - name: Set up go toolchain + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + check-latest: false + cache: true + + - name: Check go toolchain + run: go version + + - name: Run make format + shell: bash + run: | + echo Home path is $HOME + export WORKBASE=$HOME/go/src/infini.sh + export WORK=$WORKBASE/$PNAME + + # for test workspace + mkdir -p $HOME/go/src/ + ln -s $GITHUB_WORKSPACE $WORKBASE + + # check work folder + ls -lrt $WORKBASE/ + ls -alrt $WORK + + # for unit test + cd $WORK + echo Formating code at $PWD ... + make format + if [ $? -ne 0 ]; then + echo "make format failed, please check make output" + exit 1 + fi + + - name: Check for changes after format + id: check-changes + shell: bash + run: | + export WORKBASE=$HOME/go/src/infini.sh + export WORK=$WORKBASE/$PNAME + + # for foramt check + cd $WORK + if [[ $(git status --porcelain | grep -c " M .*\.go$") -gt 0 ]]; then + echo "go format detected formatting changes" + echo "changes=true" >> $GITHUB_OUTPUT + else + echo "go format no changes found" + echo "changes=false" >> $GITHUB_OUTPUT + fi + + - name: Fail workflow if changes after format + if: steps.check-changes.outputs.changes == 'true' + run: exit 1 + + unit_test: + runs-on: ubuntu-latest + steps: + - name: Checkout current repository + uses: actions/checkout@v4 + with: + path: console + + - name: Checkout framework repository + uses: actions/checkout@v4 + with: + repository: infinilabs/framework + path: framework + + - name: Checkout framework-vendor + uses: actions/checkout@v4 + with: + ref: main + repository: infinilabs/framework-vendor + path: vendor + + - name: Set up nodejs toolchain + uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODEJS_VERSION }} + + - name: Cache dependencies + uses: actions/cache@v4 + with: + path: | + node_modules + key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }} + restore-keys: | + ${{ runner.os }}-cnpm- + + - name: Check nodejs toolchain + run: | + if ! command -v cnpm >/dev/null 2>&1; then + npm install -g rimraf --quiet --no-progress + npm install -g cnpm@9.2.0 --quiet --no-progress + fi + node -v && npm -v && cnpm -v + + - name: Set up go toolchain + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + check-latest: false + cache: true + + - name: Check go toolchain + run: go version + + - name: Cache Build Output + uses: actions/cache@v4 + with: + path: | + .public + key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}- + ${{ runner.os }}-build- + + - name: Unit test + env: + GOFLAGS: -tags=ci + run: | + echo Home path is $HOME + export WORKBASE=$HOME/go/src/infini.sh + export WORK=$WORKBASE/console + + # for test workspace + mkdir -p $HOME/go/src/ + ln -s $GITHUB_WORKSPACE $WORKBASE + + # for web build + cd $WORK/web + cnpm install --quiet --no-progress + cnpm run build --quiet + + # check work folder + ls -lrt $WORKBASE/ + ls -alrt $WORK + + # for unit test + cd $WORK + echo Testing code at $PWD ... + make test + + code_lint: + runs-on: ubuntu-latest + steps: + - name: Checkout current repository + uses: actions/checkout@v4 + with: + path: ${{ env.PNAME }} + + - name: Checkout framework repository + uses: actions/checkout@v4 + with: + repository: infinilabs/framework + path: framework + + - name: Checkout framework-vendor + uses: actions/checkout@v4 + with: + ref: main + repository: infinilabs/framework-vendor + path: vendor + + - name: Set up go toolchain + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + check-latest: false + cache: true + + - name: Check go toolchain + run: go version + + - name: Code lint + env: + GOFLAGS: -tags=ci + run: | + echo Home path is $HOME + export WORKBASE=$HOME/go/src/infini.sh + export WORK=$WORKBASE/$PNAME + + # for test workspace + mkdir -p $HOME/go/src/ + ln -s $GITHUB_WORKSPACE $WORKBASE + + # check work folder + ls -lrt $WORKBASE/ + ls -alrt $WORK + + # for unit test + cd $WORK + echo Linting code at $PWD ... + make lint \ No newline at end of file From 550a6f3781a414968bb667e58511ce2ee4f9511e Mon Sep 17 00:00:00 2001 From: luohoufu Date: Tue, 14 Jan 2025 13:17:51 +0800 Subject: [PATCH 5/7] fix: lint with config --- .github/workflows/pr_check.yml | 43 +++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml index 6df8ed80..416ad63f 100644 --- a/.github/workflows/pr_check.yml +++ b/.github/workflows/pr_check.yml @@ -200,6 +200,28 @@ jobs: repository: infinilabs/framework-vendor path: vendor + - name: Set up nodejs toolchain + uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODEJS_VERSION }} + + - name: Cache dependencies + uses: actions/cache@v4 + with: + path: | + node_modules + key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }} + restore-keys: | + ${{ runner.os }}-cnpm- + + - name: Check nodejs toolchain + run: | + if ! command -v cnpm >/dev/null 2>&1; then + npm install -g rimraf --quiet --no-progress + npm install -g cnpm@9.2.0 --quiet --no-progress + fi + node -v && npm -v && cnpm -v + - name: Set up go toolchain uses: actions/setup-go@v5 with: @@ -210,23 +232,38 @@ jobs: - name: Check go toolchain run: go version + - name: Cache Build Output + uses: actions/cache@v4 + with: + path: | + .public + key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}- + ${{ runner.os }}-build- + - name: Code lint env: GOFLAGS: -tags=ci run: | echo Home path is $HOME export WORKBASE=$HOME/go/src/infini.sh - export WORK=$WORKBASE/$PNAME + export WORK=$WORKBASE/console # for test workspace mkdir -p $HOME/go/src/ ln -s $GITHUB_WORKSPACE $WORKBASE - + + # for web build + cd $WORK/web + cnpm install --quiet --no-progress + cnpm run build --quiet + # check work folder ls -lrt $WORKBASE/ ls -alrt $WORK # for unit test cd $WORK - echo Linting code at $PWD ... + echo Testing code at $PWD ... make lint \ No newline at end of file From a749c6dcbed8101b81137f1caffa0b7b4b0e1ce1 Mon Sep 17 00:00:00 2001 From: luohoufu Date: Tue, 14 Jan 2025 14:17:35 +0800 Subject: [PATCH 6/7] chore: this pr only unit test --- .github/workflows/pr_check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml index 416ad63f..12833957 100644 --- a/.github/workflows/pr_check.yml +++ b/.github/workflows/pr_check.yml @@ -266,4 +266,4 @@ jobs: # for unit test cd $WORK echo Testing code at $PWD ... - make lint \ No newline at end of file + # make lint \ No newline at end of file From 82b528a78d8450ca9d9df8fefe68f2038ec3436b Mon Sep 17 00:00:00 2001 From: luohoufu Date: Tue, 14 Jan 2025 14:25:15 +0800 Subject: [PATCH 7/7] fix: code format error --- .github/workflows/pr_check.yml | 64 +++++++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 13 deletions(-) diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml index 12833957..c9e2e9a3 100644 --- a/.github/workflows/pr_check.yml +++ b/.github/workflows/pr_check.yml @@ -15,8 +15,8 @@ env: jobs: format_check: - runs-on: ubuntu-latest - steps: + runs-on: ubuntu-latest + steps: - name: Checkout current repository uses: actions/checkout@v4 with: @@ -35,6 +35,28 @@ jobs: repository: infinilabs/framework-vendor path: vendor + - name: Set up nodejs toolchain + uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODEJS_VERSION }} + + - name: Cache dependencies + uses: actions/cache@v4 + with: + path: | + node_modules + key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }} + restore-keys: | + ${{ runner.os }}-cnpm- + + - name: Check nodejs toolchain + run: | + if ! command -v cnpm >/dev/null 2>&1; then + npm install -g rimraf --quiet --no-progress + npm install -g cnpm@9.2.0 --quiet --no-progress + fi + node -v && npm -v && cnpm -v + - name: Set up go toolchain uses: actions/setup-go@v5 with: @@ -44,23 +66,39 @@ jobs: - name: Check go toolchain run: go version - - - name: Run make format - shell: bash + + - name: Cache Build Output + uses: actions/cache@v4 + with: + path: | + .public + key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}- + ${{ runner.os }}-build- + + - name: Code format + env: + GOFLAGS: -tags=ci run: | echo Home path is $HOME export WORKBASE=$HOME/go/src/infini.sh - export WORK=$WORKBASE/$PNAME + export WORK=$WORKBASE/console # for test workspace mkdir -p $HOME/go/src/ ln -s $GITHUB_WORKSPACE $WORKBASE - + + # for web build + cd $WORK/web + cnpm install --quiet --no-progress + cnpm run build --quiet + # check work folder ls -lrt $WORKBASE/ ls -alrt $WORK - - # for unit test + + # for code format cd $WORK echo Formating code at $PWD ... make format @@ -96,7 +134,7 @@ jobs: - name: Checkout current repository uses: actions/checkout@v4 with: - path: console + path: ${{ env.PNAME }} - name: Checkout framework repository uses: actions/checkout@v4 @@ -159,7 +197,7 @@ jobs: run: | echo Home path is $HOME export WORKBASE=$HOME/go/src/infini.sh - export WORK=$WORKBASE/console + export WORK=$WORKBASE/$PNAME # for test workspace mkdir -p $HOME/go/src/ @@ -248,7 +286,7 @@ jobs: run: | echo Home path is $HOME export WORKBASE=$HOME/go/src/infini.sh - export WORK=$WORKBASE/console + export WORK=$WORKBASE/$PNAME # for test workspace mkdir -p $HOME/go/src/ @@ -263,7 +301,7 @@ jobs: ls -lrt $WORKBASE/ ls -alrt $WORK - # for unit test + # for code lint cd $WORK echo Testing code at $PWD ... # make lint \ No newline at end of file