Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a new config and metric for reporting ruler query execution wall time. #4317

Merged
merged 16 commits into from
Jul 9, 2021
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Changelog

## master / unreleased
* [FEATURE] Ruler: Add new `-ruler.query-stats-enabled` which when enabled will report the `cortex_ruler_query_seconds_total` metric that tracks the sum of the wall time of executing queries in the ruler in seconds. #4317
treid314 marked this conversation as resolved.
Show resolved Hide resolved

## 1.10.0-rc.0 / 2021-06-28

Expand Down
4 changes: 4 additions & 0 deletions docs/configuration/config-file-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -1616,6 +1616,10 @@ ring:
# processing will ignore them instead. Subject to sharding.
# CLI flag: -ruler.disabled-tenants
[disabled_tenants: <string> | default = ""]

# Report the wall time for ruler queries to complete as a metric.
# CLI flag: -ruler.query-stats-enabled
[query_stats_enabled: <boolean> | default = false]
```

### `ruler_storage_config`
Expand Down
23 changes: 20 additions & 3 deletions pkg/ruler/compat.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,12 +143,17 @@ func EngineQueryFunc(engine *promql.Engine, q storage.Queryable, overrides Rules
}
}

func MetricsQueryFunc(qf rules.QueryFunc, queries, failedQueries prometheus.Counter) rules.QueryFunc {
func MetricsQueryFunc(qf rules.QueryFunc, queries, failedQueries prometheus.Counter, queryTime prometheus.Counter) rules.QueryFunc {
return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
queries.Inc()

result, err := qf(ctx, qs, t)
// If we've been passed a counter we want to record the wall time spent executing this request.
if queryTime != nil {
treid314 marked this conversation as resolved.
Show resolved Hide resolved
timer := prometheus.NewTimer(nil)
defer func() { queryTime.Add(timer.ObserveDuration().Seconds()) }()
}

result, err := qf(ctx, qs, t)
// We rely on TranslateToPromqlApiError to do its job here... it returns nil, if err is nil.
// It returns promql.ErrStorage, if error should be reported back as 500.
// Other errors it returns are either for canceled or timed-out queriers (we're not reporting those as failures),
Expand Down Expand Up @@ -199,12 +204,24 @@ func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engi
Name: "cortex_ruler_queries_failed_total",
Help: "Number of failed queries by ruler.",
})
var rulerQuerySeconds *prometheus.CounterVec
if cfg.EnableQueryStats {
rulerQuerySeconds = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "cortex_ruler_query_seconds_total",
Help: "Total amount of wall clock time spent processing queries by the ruler.",
}, []string{"user"})
treid314 marked this conversation as resolved.
Show resolved Hide resolved
}

return func(ctx context.Context, userID string, notifier *notifier.Manager, logger log.Logger, reg prometheus.Registerer) RulesManager {
var queryTime prometheus.Counter = nil
if rulerQuerySeconds != nil {
queryTime = rulerQuerySeconds.WithLabelValues(userID)
}

pracucci marked this conversation as resolved.
Show resolved Hide resolved
return rules.NewManager(&rules.ManagerOptions{
Appendable: NewPusherAppendable(p, userID, overrides, totalWrites, failedWrites),
Queryable: q,
QueryFunc: MetricsQueryFunc(EngineQueryFunc(engine, q, overrides, userID), totalQueries, failedQueries),
QueryFunc: MetricsQueryFunc(EngineQueryFunc(engine, q, overrides, userID), totalQueries, failedQueries, queryTime),
Context: user.InjectOrgID(ctx, userID),
ExternalURL: cfg.ExternalURL.URL,
NotifyFunc: SendAlerts(notifier, cfg.ExternalURL.URL.String()),
Expand Down
22 changes: 20 additions & 2 deletions pkg/ruler/compat_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,12 +226,12 @@ func TestMetricsQueryFuncErrors(t *testing.T) {
t.Run(name, func(t *testing.T) {
queries := prometheus.NewCounter(prometheus.CounterOpts{})
failures := prometheus.NewCounter(prometheus.CounterOpts{})
queryTime := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"})

mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) {
return promql.Vector{}, tc.returnedError
}

qf := MetricsQueryFunc(mockFunc, queries, failures)
qf := MetricsQueryFunc(mockFunc, queries, failures, queryTime.WithLabelValues("userID"))

_, err := qf(context.Background(), "test", time.Now())
require.Equal(t, tc.returnedError, err)
Expand All @@ -241,3 +241,21 @@ func TestMetricsQueryFuncErrors(t *testing.T) {
})
}
}

func TestMetricsQueryFuncMetrics(t *testing.T) {
treid314 marked this conversation as resolved.
Show resolved Hide resolved
queries := prometheus.NewCounter(prometheus.CounterOpts{})
failures := prometheus.NewCounter(prometheus.CounterOpts{})
queryTime := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"})

mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) {
time.Sleep(1 * time.Second)
return promql.Vector{}, nil
}
qf := MetricsQueryFunc(mockFunc, queries, failures, queryTime.WithLabelValues("userID"))

_, _ = qf(context.Background(), "test", time.Now())

require.Equal(t, 1, int(testutil.ToFloat64(queries)))
require.Equal(t, 0, int(testutil.ToFloat64(failures)))
require.LessOrEqual(t, float64(1), testutil.ToFloat64(queryTime.WithLabelValues("userID")))
treid314 marked this conversation as resolved.
Show resolved Hide resolved
}
4 changes: 4 additions & 0 deletions pkg/ruler/ruler.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,8 @@ type Config struct {
DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"`

RingCheckPeriod time.Duration `yaml:"-"`

EnableQueryStats bool `yaml:"query_stats_enabled"`
}

// Validate config and returns error on failure
Expand Down Expand Up @@ -173,6 +175,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.Var(&cfg.EnabledTenants, "ruler.enabled-tenants", "Comma separated list of tenants whose rules this ruler can evaluate. If specified, only these tenants will be handled by ruler, otherwise this ruler can process rules from all tenants. Subject to sharding.")
f.Var(&cfg.DisabledTenants, "ruler.disabled-tenants", "Comma separated list of tenants whose rules this ruler cannot evaluate. If specified, a ruler that would normally pick the specified tenant(s) for processing will ignore them instead. Subject to sharding.")

f.BoolVar(&cfg.EnableQueryStats, "ruler.query-stats-enabled", false, "Report the wall time for ruler queries to complete as a metric.")

cfg.RingCheckPeriod = 5 * time.Second
}

Expand Down
1 change: 1 addition & 0 deletions pkg/ruler/ruler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ func defaultRulerConfig(store rulestore.RuleStore) (Config, func()) {
cfg.Ring.ListenPort = 0
cfg.Ring.InstanceAddr = "localhost"
cfg.Ring.InstanceID = "localhost"
cfg.EnableQueryStats = false

// Create a cleanup function that will be called at the end of the test
cleanup := func() {
Expand Down