From 5e41c6b3c8c3e0460012824c47e43093d49d5439 Mon Sep 17 00:00:00 2001 From: Dominik Rosiek <58699848+sumo-drosiek@users.noreply.github.com> Date: Mon, 13 May 2024 21:44:58 +0200 Subject: [PATCH 01/53] [exporter/sumologic]: add sticky session (#33011) **Description:** Adds support for sticky session in order to better support AWS LB. This code is moved from Sumo Logic repository **Link to tracking Issue:** #32315 **Testing:** Tested manually **Documentation:** N/A --------- Signed-off-by: Dominik Rosiek --- .chloggen/drosiek-sumo-sticky-session.yaml | 27 ++++++++ exporter/sumologicexporter/README.md | 4 ++ exporter/sumologicexporter/config.go | 10 ++- exporter/sumologicexporter/exporter.go | 28 ++++++++ exporter/sumologicexporter/factory.go | 7 +- exporter/sumologicexporter/sender.go | 80 ++++++++++++++++------ exporter/sumologicexporter/sender_test.go | 2 + 7 files changed, 133 insertions(+), 25 deletions(-) create mode 100644 .chloggen/drosiek-sumo-sticky-session.yaml diff --git a/.chloggen/drosiek-sumo-sticky-session.yaml b/.chloggen/drosiek-sumo-sticky-session.yaml new file mode 100644 index 000000000000..25bd0643920d --- /dev/null +++ b/.chloggen/drosiek-sumo-sticky-session.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: sumologicexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: add sticky session support + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32315] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/sumologicexporter/README.md b/exporter/sumologicexporter/README.md index 39267e26e61e..1e354760c4e0 100644 --- a/exporter/sumologicexporter/README.md +++ b/exporter/sumologicexporter/README.md @@ -147,6 +147,10 @@ exporters: # maximum connection timeout is 55s, default = 5s timeout: + # defines if sticky session support is enable. + # default=false + sticky_session_enabled: {true, false} + # for below described queueing and retry related configuration please refer to: # https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration diff --git a/exporter/sumologicexporter/config.go b/exporter/sumologicexporter/config.go index c8746534969c..32457da54418 100644 --- a/exporter/sumologicexporter/config.go +++ b/exporter/sumologicexporter/config.go @@ -64,6 +64,10 @@ type Config struct { SourceHost string `mapstructure:"source_host"` // Name of the client Client string `mapstructure:"client"` + + // StickySessionEnabled defines if sticky session support is enable. + // By default this is false. + StickySessionEnabled bool `mapstructure:"sticky_session_enabled"` } // createDefaultClientConfig returns default http client settings @@ -132,8 +136,10 @@ const ( DefaultClient string = "otelcol" // DefaultLogKey defines default LogKey value DefaultLogKey string = "log" - // DefaultGraphiteTemplate defines default template for Graphite - DefaultGraphiteTemplate string = "%{_metric_}" + // DefaultDropRoutingAttribute defines default DropRoutingAttribute + DefaultDropRoutingAttribute string = "" + // DefaultStickySessionEnabled defines default StickySessionEnabled value + DefaultStickySessionEnabled bool = false ) func (cfg *Config) Validate() error { diff --git a/exporter/sumologicexporter/exporter.go b/exporter/sumologicexporter/exporter.go index 9eca025ecd7c..ccff5f6f2512 100644 --- a/exporter/sumologicexporter/exporter.go +++ b/exporter/sumologicexporter/exporter.go @@ -54,6 +54,9 @@ type sumologicexporter struct { foundSumologicExtension bool sumologicExtension *sumologicextension.SumologicExtension + stickySessionCookieLock sync.RWMutex + stickySessionCookie string + id component.ID } @@ -289,6 +292,8 @@ func (se *sumologicexporter) pushLogsData(ctx context.Context, ld plog.Logs) err metricsURL, logsURL, tracesURL, + se.StickySessionCookie, + se.SetStickySessionCookie, se.id, ) @@ -366,6 +371,8 @@ func (se *sumologicexporter) pushMetricsData(ctx context.Context, md pmetric.Met metricsURL, logsURL, tracesURL, + se.StickySessionCookie, + se.SetStickySessionCookie, se.id, ) @@ -406,6 +413,27 @@ func (se *sumologicexporter) handleUnauthorizedErrors(ctx context.Context, errs } } +func (se *sumologicexporter) StickySessionCookie() string { + if se.foundSumologicExtension { + return se.sumologicExtension.StickySessionCookie() + } + + se.stickySessionCookieLock.RLock() + defer se.stickySessionCookieLock.RUnlock() + return se.stickySessionCookie +} + +func (se *sumologicexporter) SetStickySessionCookie(stickySessionCookie string) { + if se.foundSumologicExtension { + se.sumologicExtension.SetStickySessionCookie(stickySessionCookie) + return + } + + se.stickySessionCookieLock.Lock() + se.stickySessionCookie = stickySessionCookie + se.stickySessionCookieLock.Unlock() +} + // get the destination url for a given signal type // this mostly adds signal-specific suffixes if the format is otlp func getSignalURL(oCfg *Config, endpointURL string, signal component.DataType) (string, error) { diff --git a/exporter/sumologicexporter/factory.go b/exporter/sumologicexporter/factory.go index e1cad8f3d4d4..9a5fbbf7087b 100644 --- a/exporter/sumologicexporter/factory.go +++ b/exporter/sumologicexporter/factory.go @@ -40,9 +40,10 @@ func createDefaultConfig() component.Config { SourceHost: DefaultSourceHost, Client: DefaultClient, - ClientConfig: createDefaultClientConfig(), - BackOffConfig: configretry.NewDefaultBackOffConfig(), - QueueSettings: qs, + ClientConfig: createDefaultClientConfig(), + BackOffConfig: configretry.NewDefaultBackOffConfig(), + QueueSettings: qs, + StickySessionEnabled: DefaultStickySessionEnabled, } } diff --git a/exporter/sumologicexporter/sender.go b/exporter/sumologicexporter/sender.go index e47ee0ac10bb..1430bbf788ae 100644 --- a/exporter/sumologicexporter/sender.go +++ b/exporter/sumologicexporter/sender.go @@ -110,16 +110,18 @@ func (b *bodyBuilder) toCountingReader() *countingReader { } type sender struct { - logger *zap.Logger - config *Config - client *http.Client - filter filter - sources sourceFormats - prometheusFormatter prometheusFormatter - dataURLMetrics string - dataURLLogs string - dataURLTraces string - id component.ID + logger *zap.Logger + config *Config + client *http.Client + filter filter + sources sourceFormats + prometheusFormatter prometheusFormatter + dataURLMetrics string + dataURLLogs string + dataURLTraces string + stickySessionCookieFunc func() string + setStickySessionCookieFunc func(string) + id component.ID } const ( @@ -140,6 +142,7 @@ const ( contentTypeLogs string = "application/x-www-form-urlencoded" contentTypePrometheus string = "application/vnd.sumologic.prometheus" contentTypeOTLP string = "application/x-protobuf" + stickySessionKey string = "AWSALB" ) func newSender( @@ -152,19 +155,23 @@ func newSender( metricsURL string, logsURL string, tracesURL string, + stickySessionCookieFunc func() string, + setStickySessionCookieFunc func(string), id component.ID, ) *sender { return &sender{ - logger: logger, - config: cfg, - client: cl, - filter: f, - sources: s, - prometheusFormatter: pf, - dataURLMetrics: metricsURL, - dataURLLogs: logsURL, - dataURLTraces: tracesURL, - id: id, + logger: logger, + config: cfg, + client: cl, + filter: f, + sources: s, + prometheusFormatter: pf, + dataURLMetrics: metricsURL, + dataURLLogs: logsURL, + dataURLTraces: tracesURL, + stickySessionCookieFunc: stickySessionCookieFunc, + setStickySessionCookieFunc: setStickySessionCookieFunc, + id: id, } } @@ -181,6 +188,10 @@ func (s *sender) send(ctx context.Context, pipeline PipelineType, reader *counti return err } + if s.config.StickySessionEnabled { + s.addStickySessionCookie(req) + } + s.logger.Debug("Sending data", zap.String("pipeline", string(pipeline)), zap.Any("headers", req.Header), @@ -200,6 +211,10 @@ func (s *sender) send(ctx context.Context, pipeline PipelineType, reader *counti } func (s *sender) handleReceiverResponse(resp *http.Response) error { + if s.config.StickySessionEnabled { + s.updateStickySessionCookie(resp) + } + // API responds with a 200 or 204 with ConentLength set to 0 when all data // has been successfully ingested. if resp.ContentLength == 0 && (resp.StatusCode == 200 || resp.StatusCode == 204) { @@ -684,3 +699,28 @@ func (s *sender) recordMetrics(duration time.Duration, count int64, req *http.Re s.logger.Debug("error for recording metric for sent request", zap.Error(err)) } } + +func (s *sender) addStickySessionCookie(req *http.Request) { + currectCookieValue := s.stickySessionCookieFunc() + if currectCookieValue != "" { + cookie := &http.Cookie{ + Name: stickySessionKey, + Value: currectCookieValue, + } + req.AddCookie(cookie) + } +} + +func (s *sender) updateStickySessionCookie(resp *http.Response) { + cookies := resp.Cookies() + if len(cookies) > 0 { + for _, cookie := range cookies { + if cookie.Name == stickySessionKey { + if cookie.Value != s.stickySessionCookieFunc() { + s.setStickySessionCookieFunc(cookie.Value) + } + return + } + } + } +} diff --git a/exporter/sumologicexporter/sender_test.go b/exporter/sumologicexporter/sender_test.go index ceca87dede06..5cd06ff0dd85 100644 --- a/exporter/sumologicexporter/sender_test.go +++ b/exporter/sumologicexporter/sender_test.go @@ -114,6 +114,8 @@ func prepareSenderTest(t *testing.T, compression configcompression.Type, cb []fu testServer.URL, testServer.URL, testServer.URL, + func() string { return "" }, + func(string) {}, component.ID{}, ), } From d07e7b9f063822ed48abf3de3f2a283def9d1cc0 Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Mon, 13 May 2024 13:50:31 -0700 Subject: [PATCH 02/53] [receiver/sqlserver] Add more metrics (#32932) **Description:** This change adds a query, scraper, and some more metrics for data from the performance counter SQL server table. The query itself is mostly taken from Telegraf's SQL server plugin. This also reuses the existing metric `sqlserver.lock.wait.rate`, so now it will be available both from Windows performance counters, as well as other OSs when directly connecting to the SQL server instance. **Naming and format feedback on new metrics would be greatly appreciated.** **Link to tracking Issue:** #29865 **Testing:** Added tests for the query itself, as well as the new scraper. Co-authored-by: Daniel Jaglowski --- .../sqlserver_add_perf_counter_query.yaml | 32 + receiver/sqlserverreceiver/documentation.md | 30 + receiver/sqlserverreceiver/factory.go | 8 + .../internal/metadata/generated_config.go | 54 +- .../metadata/generated_config_test.go | 90 +- .../internal/metadata/generated_metrics.go | 298 +- .../metadata/generated_metrics_test.go | 45 + .../internal/metadata/testdata/config.yaml | 12 + receiver/sqlserverreceiver/metadata.yaml | 27 + receiver/sqlserverreceiver/queries.go | 179 ++ receiver/sqlserverreceiver/queries_test.go | 63 +- receiver/sqlserverreceiver/scraper.go | 52 +- receiver/sqlserverreceiver/scraper_test.go | 46 +- ...tabase_io.yaml => expectedDatabaseIO.yaml} | 0 .../testdata/expectedPerfCounters.yaml | 49 + .../testdata/perfCounterQueryData.txt | 2603 +++++++++++++++++ .../perfCounterQueryWithInstanceName.txt | 165 ++ .../perfCounterQueryWithoutInstanceName.txt | 165 ++ 18 files changed, 3769 insertions(+), 149 deletions(-) create mode 100644 .chloggen/sqlserver_add_perf_counter_query.yaml rename receiver/sqlserverreceiver/testdata/{expected_database_io.yaml => expectedDatabaseIO.yaml} (100%) create mode 100644 receiver/sqlserverreceiver/testdata/expectedPerfCounters.yaml create mode 100644 receiver/sqlserverreceiver/testdata/perfCounterQueryData.txt create mode 100644 receiver/sqlserverreceiver/testdata/perfCounterQueryWithInstanceName.txt create mode 100644 receiver/sqlserverreceiver/testdata/perfCounterQueryWithoutInstanceName.txt diff --git a/.chloggen/sqlserver_add_perf_counter_query.yaml b/.chloggen/sqlserver_add_perf_counter_query.yaml new file mode 100644 index 000000000000..103fb1071687 --- /dev/null +++ b/.chloggen/sqlserver_add_perf_counter_query.yaml @@ -0,0 +1,32 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: sqlserverreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add more metrics + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [29865] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + Added metrics are: + - sqlserver.resource_pool.disk.throttled.read.rate + - sqlserver.resource_pool.disk.throttled.write.rate + - sqlserver.processes.blocked + These metrics are only available when directly connecting to the SQL server instance + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/sqlserverreceiver/documentation.md b/receiver/sqlserverreceiver/documentation.md index fc8f55a6ec6f..c20bc2f8ef27 100644 --- a/receiver/sqlserverreceiver/documentation.md +++ b/receiver/sqlserverreceiver/documentation.md @@ -246,6 +246,36 @@ This metric is only available when the receiver is configured to directly connec | logical_filename | The logical filename of the file being monitored. | Any Str | | file_type | The type of file being monitored. | Any Str | +### sqlserver.processes.blocked + +The number of processes that are currently blocked + +This metric is only available when the receiver is configured to directly connect to SQL Server. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {processes} | Gauge | Int | + +### sqlserver.resource_pool.disk.throttled.read.rate + +The number of read operations that were throttled in the last second + +This metric is only available when the receiver is configured to directly connect to SQL Server. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {reads}/s | Gauge | Int | + +### sqlserver.resource_pool.disk.throttled.write.rate + +The number of write operations that were throttled in the last second + +This metric is only available when the receiver is configured to directly connect to SQL Server. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {writes}/s | Gauge | Double | + ## Resource Attributes | Name | Description | Values | Enabled | diff --git a/receiver/sqlserverreceiver/factory.go b/receiver/sqlserverreceiver/factory.go index 90e20ead901b..c2eb63752e5f 100644 --- a/receiver/sqlserverreceiver/factory.go +++ b/receiver/sqlserverreceiver/factory.go @@ -43,6 +43,14 @@ func setupQueries(cfg *Config) []string { queries = append(queries, getSQLServerDatabaseIOQuery(cfg.InstanceName)) } + if cfg.MetricsBuilderConfig.Metrics.SqlserverResourcePoolDiskThrottledReadRate.Enabled || + cfg.MetricsBuilderConfig.Metrics.SqlserverResourcePoolDiskThrottledWriteRate.Enabled || + cfg.MetricsBuilderConfig.Metrics.SqlserverLockWaitRate.Enabled || + cfg.MetricsBuilderConfig.Metrics.SqlserverProcessesBlocked.Enabled { + + queries = append(queries, getSQLServerPerformanceCounterQuery(cfg.InstanceName)) + } + return queries } diff --git a/receiver/sqlserverreceiver/internal/metadata/generated_config.go b/receiver/sqlserverreceiver/internal/metadata/generated_config.go index 1b915f45f047..df10d88f214e 100644 --- a/receiver/sqlserverreceiver/internal/metadata/generated_config.go +++ b/receiver/sqlserverreceiver/internal/metadata/generated_config.go @@ -28,27 +28,30 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for sqlserver metrics. type MetricsConfig struct { - SqlserverBatchRequestRate MetricConfig `mapstructure:"sqlserver.batch.request.rate"` - SqlserverBatchSQLCompilationRate MetricConfig `mapstructure:"sqlserver.batch.sql_compilation.rate"` - SqlserverBatchSQLRecompilationRate MetricConfig `mapstructure:"sqlserver.batch.sql_recompilation.rate"` - SqlserverDatabaseIoReadLatency MetricConfig `mapstructure:"sqlserver.database.io.read_latency"` - SqlserverLockWaitRate MetricConfig `mapstructure:"sqlserver.lock.wait.rate"` - SqlserverLockWaitTimeAvg MetricConfig `mapstructure:"sqlserver.lock.wait_time.avg"` - SqlserverPageBufferCacheHitRatio MetricConfig `mapstructure:"sqlserver.page.buffer_cache.hit_ratio"` - SqlserverPageCheckpointFlushRate MetricConfig `mapstructure:"sqlserver.page.checkpoint.flush.rate"` - SqlserverPageLazyWriteRate MetricConfig `mapstructure:"sqlserver.page.lazy_write.rate"` - SqlserverPageLifeExpectancy MetricConfig `mapstructure:"sqlserver.page.life_expectancy"` - SqlserverPageOperationRate MetricConfig `mapstructure:"sqlserver.page.operation.rate"` - SqlserverPageSplitRate MetricConfig `mapstructure:"sqlserver.page.split.rate"` - SqlserverTransactionRate MetricConfig `mapstructure:"sqlserver.transaction.rate"` - SqlserverTransactionWriteRate MetricConfig `mapstructure:"sqlserver.transaction.write.rate"` - SqlserverTransactionLogFlushDataRate MetricConfig `mapstructure:"sqlserver.transaction_log.flush.data.rate"` - SqlserverTransactionLogFlushRate MetricConfig `mapstructure:"sqlserver.transaction_log.flush.rate"` - SqlserverTransactionLogFlushWaitRate MetricConfig `mapstructure:"sqlserver.transaction_log.flush.wait.rate"` - SqlserverTransactionLogGrowthCount MetricConfig `mapstructure:"sqlserver.transaction_log.growth.count"` - SqlserverTransactionLogShrinkCount MetricConfig `mapstructure:"sqlserver.transaction_log.shrink.count"` - SqlserverTransactionLogUsage MetricConfig `mapstructure:"sqlserver.transaction_log.usage"` - SqlserverUserConnectionCount MetricConfig `mapstructure:"sqlserver.user.connection.count"` + SqlserverBatchRequestRate MetricConfig `mapstructure:"sqlserver.batch.request.rate"` + SqlserverBatchSQLCompilationRate MetricConfig `mapstructure:"sqlserver.batch.sql_compilation.rate"` + SqlserverBatchSQLRecompilationRate MetricConfig `mapstructure:"sqlserver.batch.sql_recompilation.rate"` + SqlserverDatabaseIoReadLatency MetricConfig `mapstructure:"sqlserver.database.io.read_latency"` + SqlserverLockWaitRate MetricConfig `mapstructure:"sqlserver.lock.wait.rate"` + SqlserverLockWaitTimeAvg MetricConfig `mapstructure:"sqlserver.lock.wait_time.avg"` + SqlserverPageBufferCacheHitRatio MetricConfig `mapstructure:"sqlserver.page.buffer_cache.hit_ratio"` + SqlserverPageCheckpointFlushRate MetricConfig `mapstructure:"sqlserver.page.checkpoint.flush.rate"` + SqlserverPageLazyWriteRate MetricConfig `mapstructure:"sqlserver.page.lazy_write.rate"` + SqlserverPageLifeExpectancy MetricConfig `mapstructure:"sqlserver.page.life_expectancy"` + SqlserverPageOperationRate MetricConfig `mapstructure:"sqlserver.page.operation.rate"` + SqlserverPageSplitRate MetricConfig `mapstructure:"sqlserver.page.split.rate"` + SqlserverProcessesBlocked MetricConfig `mapstructure:"sqlserver.processes.blocked"` + SqlserverResourcePoolDiskThrottledReadRate MetricConfig `mapstructure:"sqlserver.resource_pool.disk.throttled.read.rate"` + SqlserverResourcePoolDiskThrottledWriteRate MetricConfig `mapstructure:"sqlserver.resource_pool.disk.throttled.write.rate"` + SqlserverTransactionRate MetricConfig `mapstructure:"sqlserver.transaction.rate"` + SqlserverTransactionWriteRate MetricConfig `mapstructure:"sqlserver.transaction.write.rate"` + SqlserverTransactionLogFlushDataRate MetricConfig `mapstructure:"sqlserver.transaction_log.flush.data.rate"` + SqlserverTransactionLogFlushRate MetricConfig `mapstructure:"sqlserver.transaction_log.flush.rate"` + SqlserverTransactionLogFlushWaitRate MetricConfig `mapstructure:"sqlserver.transaction_log.flush.wait.rate"` + SqlserverTransactionLogGrowthCount MetricConfig `mapstructure:"sqlserver.transaction_log.growth.count"` + SqlserverTransactionLogShrinkCount MetricConfig `mapstructure:"sqlserver.transaction_log.shrink.count"` + SqlserverTransactionLogUsage MetricConfig `mapstructure:"sqlserver.transaction_log.usage"` + SqlserverUserConnectionCount MetricConfig `mapstructure:"sqlserver.user.connection.count"` } func DefaultMetricsConfig() MetricsConfig { @@ -89,6 +92,15 @@ func DefaultMetricsConfig() MetricsConfig { SqlserverPageSplitRate: MetricConfig{ Enabled: true, }, + SqlserverProcessesBlocked: MetricConfig{ + Enabled: false, + }, + SqlserverResourcePoolDiskThrottledReadRate: MetricConfig{ + Enabled: false, + }, + SqlserverResourcePoolDiskThrottledWriteRate: MetricConfig{ + Enabled: false, + }, SqlserverTransactionRate: MetricConfig{ Enabled: true, }, diff --git a/receiver/sqlserverreceiver/internal/metadata/generated_config_test.go b/receiver/sqlserverreceiver/internal/metadata/generated_config_test.go index b1c2ca91094f..b8b02ebd9c5e 100644 --- a/receiver/sqlserverreceiver/internal/metadata/generated_config_test.go +++ b/receiver/sqlserverreceiver/internal/metadata/generated_config_test.go @@ -26,27 +26,30 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - SqlserverBatchRequestRate: MetricConfig{Enabled: true}, - SqlserverBatchSQLCompilationRate: MetricConfig{Enabled: true}, - SqlserverBatchSQLRecompilationRate: MetricConfig{Enabled: true}, - SqlserverDatabaseIoReadLatency: MetricConfig{Enabled: true}, - SqlserverLockWaitRate: MetricConfig{Enabled: true}, - SqlserverLockWaitTimeAvg: MetricConfig{Enabled: true}, - SqlserverPageBufferCacheHitRatio: MetricConfig{Enabled: true}, - SqlserverPageCheckpointFlushRate: MetricConfig{Enabled: true}, - SqlserverPageLazyWriteRate: MetricConfig{Enabled: true}, - SqlserverPageLifeExpectancy: MetricConfig{Enabled: true}, - SqlserverPageOperationRate: MetricConfig{Enabled: true}, - SqlserverPageSplitRate: MetricConfig{Enabled: true}, - SqlserverTransactionRate: MetricConfig{Enabled: true}, - SqlserverTransactionWriteRate: MetricConfig{Enabled: true}, - SqlserverTransactionLogFlushDataRate: MetricConfig{Enabled: true}, - SqlserverTransactionLogFlushRate: MetricConfig{Enabled: true}, - SqlserverTransactionLogFlushWaitRate: MetricConfig{Enabled: true}, - SqlserverTransactionLogGrowthCount: MetricConfig{Enabled: true}, - SqlserverTransactionLogShrinkCount: MetricConfig{Enabled: true}, - SqlserverTransactionLogUsage: MetricConfig{Enabled: true}, - SqlserverUserConnectionCount: MetricConfig{Enabled: true}, + SqlserverBatchRequestRate: MetricConfig{Enabled: true}, + SqlserverBatchSQLCompilationRate: MetricConfig{Enabled: true}, + SqlserverBatchSQLRecompilationRate: MetricConfig{Enabled: true}, + SqlserverDatabaseIoReadLatency: MetricConfig{Enabled: true}, + SqlserverLockWaitRate: MetricConfig{Enabled: true}, + SqlserverLockWaitTimeAvg: MetricConfig{Enabled: true}, + SqlserverPageBufferCacheHitRatio: MetricConfig{Enabled: true}, + SqlserverPageCheckpointFlushRate: MetricConfig{Enabled: true}, + SqlserverPageLazyWriteRate: MetricConfig{Enabled: true}, + SqlserverPageLifeExpectancy: MetricConfig{Enabled: true}, + SqlserverPageOperationRate: MetricConfig{Enabled: true}, + SqlserverPageSplitRate: MetricConfig{Enabled: true}, + SqlserverProcessesBlocked: MetricConfig{Enabled: true}, + SqlserverResourcePoolDiskThrottledReadRate: MetricConfig{Enabled: true}, + SqlserverResourcePoolDiskThrottledWriteRate: MetricConfig{Enabled: true}, + SqlserverTransactionRate: MetricConfig{Enabled: true}, + SqlserverTransactionWriteRate: MetricConfig{Enabled: true}, + SqlserverTransactionLogFlushDataRate: MetricConfig{Enabled: true}, + SqlserverTransactionLogFlushRate: MetricConfig{Enabled: true}, + SqlserverTransactionLogFlushWaitRate: MetricConfig{Enabled: true}, + SqlserverTransactionLogGrowthCount: MetricConfig{Enabled: true}, + SqlserverTransactionLogShrinkCount: MetricConfig{Enabled: true}, + SqlserverTransactionLogUsage: MetricConfig{Enabled: true}, + SqlserverUserConnectionCount: MetricConfig{Enabled: true}, }, ResourceAttributes: ResourceAttributesConfig{ SqlserverComputerName: ResourceAttributeConfig{Enabled: true}, @@ -59,27 +62,30 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - SqlserverBatchRequestRate: MetricConfig{Enabled: false}, - SqlserverBatchSQLCompilationRate: MetricConfig{Enabled: false}, - SqlserverBatchSQLRecompilationRate: MetricConfig{Enabled: false}, - SqlserverDatabaseIoReadLatency: MetricConfig{Enabled: false}, - SqlserverLockWaitRate: MetricConfig{Enabled: false}, - SqlserverLockWaitTimeAvg: MetricConfig{Enabled: false}, - SqlserverPageBufferCacheHitRatio: MetricConfig{Enabled: false}, - SqlserverPageCheckpointFlushRate: MetricConfig{Enabled: false}, - SqlserverPageLazyWriteRate: MetricConfig{Enabled: false}, - SqlserverPageLifeExpectancy: MetricConfig{Enabled: false}, - SqlserverPageOperationRate: MetricConfig{Enabled: false}, - SqlserverPageSplitRate: MetricConfig{Enabled: false}, - SqlserverTransactionRate: MetricConfig{Enabled: false}, - SqlserverTransactionWriteRate: MetricConfig{Enabled: false}, - SqlserverTransactionLogFlushDataRate: MetricConfig{Enabled: false}, - SqlserverTransactionLogFlushRate: MetricConfig{Enabled: false}, - SqlserverTransactionLogFlushWaitRate: MetricConfig{Enabled: false}, - SqlserverTransactionLogGrowthCount: MetricConfig{Enabled: false}, - SqlserverTransactionLogShrinkCount: MetricConfig{Enabled: false}, - SqlserverTransactionLogUsage: MetricConfig{Enabled: false}, - SqlserverUserConnectionCount: MetricConfig{Enabled: false}, + SqlserverBatchRequestRate: MetricConfig{Enabled: false}, + SqlserverBatchSQLCompilationRate: MetricConfig{Enabled: false}, + SqlserverBatchSQLRecompilationRate: MetricConfig{Enabled: false}, + SqlserverDatabaseIoReadLatency: MetricConfig{Enabled: false}, + SqlserverLockWaitRate: MetricConfig{Enabled: false}, + SqlserverLockWaitTimeAvg: MetricConfig{Enabled: false}, + SqlserverPageBufferCacheHitRatio: MetricConfig{Enabled: false}, + SqlserverPageCheckpointFlushRate: MetricConfig{Enabled: false}, + SqlserverPageLazyWriteRate: MetricConfig{Enabled: false}, + SqlserverPageLifeExpectancy: MetricConfig{Enabled: false}, + SqlserverPageOperationRate: MetricConfig{Enabled: false}, + SqlserverPageSplitRate: MetricConfig{Enabled: false}, + SqlserverProcessesBlocked: MetricConfig{Enabled: false}, + SqlserverResourcePoolDiskThrottledReadRate: MetricConfig{Enabled: false}, + SqlserverResourcePoolDiskThrottledWriteRate: MetricConfig{Enabled: false}, + SqlserverTransactionRate: MetricConfig{Enabled: false}, + SqlserverTransactionWriteRate: MetricConfig{Enabled: false}, + SqlserverTransactionLogFlushDataRate: MetricConfig{Enabled: false}, + SqlserverTransactionLogFlushRate: MetricConfig{Enabled: false}, + SqlserverTransactionLogFlushWaitRate: MetricConfig{Enabled: false}, + SqlserverTransactionLogGrowthCount: MetricConfig{Enabled: false}, + SqlserverTransactionLogShrinkCount: MetricConfig{Enabled: false}, + SqlserverTransactionLogUsage: MetricConfig{Enabled: false}, + SqlserverUserConnectionCount: MetricConfig{Enabled: false}, }, ResourceAttributes: ResourceAttributesConfig{ SqlserverComputerName: ResourceAttributeConfig{Enabled: false}, diff --git a/receiver/sqlserverreceiver/internal/metadata/generated_metrics.go b/receiver/sqlserverreceiver/internal/metadata/generated_metrics.go index 71475f25e04f..ceb4f1797d8e 100644 --- a/receiver/sqlserverreceiver/internal/metadata/generated_metrics.go +++ b/receiver/sqlserverreceiver/internal/metadata/generated_metrics.go @@ -3,6 +3,8 @@ package metadata import ( + "fmt" + "strconv" "time" "go.opentelemetry.io/collector/component" @@ -634,6 +636,153 @@ func newMetricSqlserverPageSplitRate(cfg MetricConfig) metricSqlserverPageSplitR return m } +type metricSqlserverProcessesBlocked struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills sqlserver.processes.blocked metric with initial data. +func (m *metricSqlserverProcessesBlocked) init() { + m.data.SetName("sqlserver.processes.blocked") + m.data.SetDescription("The number of processes that are currently blocked") + m.data.SetUnit("{processes}") + m.data.SetEmptyGauge() +} + +func (m *metricSqlserverProcessesBlocked) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSqlserverProcessesBlocked) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSqlserverProcessesBlocked) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSqlserverProcessesBlocked(cfg MetricConfig) metricSqlserverProcessesBlocked { + m := metricSqlserverProcessesBlocked{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSqlserverResourcePoolDiskThrottledReadRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills sqlserver.resource_pool.disk.throttled.read.rate metric with initial data. +func (m *metricSqlserverResourcePoolDiskThrottledReadRate) init() { + m.data.SetName("sqlserver.resource_pool.disk.throttled.read.rate") + m.data.SetDescription("The number of read operations that were throttled in the last second") + m.data.SetUnit("{reads}/s") + m.data.SetEmptyGauge() +} + +func (m *metricSqlserverResourcePoolDiskThrottledReadRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSqlserverResourcePoolDiskThrottledReadRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSqlserverResourcePoolDiskThrottledReadRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSqlserverResourcePoolDiskThrottledReadRate(cfg MetricConfig) metricSqlserverResourcePoolDiskThrottledReadRate { + m := metricSqlserverResourcePoolDiskThrottledReadRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSqlserverResourcePoolDiskThrottledWriteRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills sqlserver.resource_pool.disk.throttled.write.rate metric with initial data. +func (m *metricSqlserverResourcePoolDiskThrottledWriteRate) init() { + m.data.SetName("sqlserver.resource_pool.disk.throttled.write.rate") + m.data.SetDescription("The number of write operations that were throttled in the last second") + m.data.SetUnit("{writes}/s") + m.data.SetEmptyGauge() +} + +func (m *metricSqlserverResourcePoolDiskThrottledWriteRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSqlserverResourcePoolDiskThrottledWriteRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSqlserverResourcePoolDiskThrottledWriteRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSqlserverResourcePoolDiskThrottledWriteRate(cfg MetricConfig) metricSqlserverResourcePoolDiskThrottledWriteRate { + m := metricSqlserverResourcePoolDiskThrottledWriteRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricSqlserverTransactionRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1082,34 +1231,37 @@ func newMetricSqlserverUserConnectionCount(cfg MetricConfig) metricSqlserverUser // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { - config MetricsBuilderConfig // config of the metrics builder. - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information. - resourceAttributeIncludeFilter map[string]filter.Filter - resourceAttributeExcludeFilter map[string]filter.Filter - metricSqlserverBatchRequestRate metricSqlserverBatchRequestRate - metricSqlserverBatchSQLCompilationRate metricSqlserverBatchSQLCompilationRate - metricSqlserverBatchSQLRecompilationRate metricSqlserverBatchSQLRecompilationRate - metricSqlserverDatabaseIoReadLatency metricSqlserverDatabaseIoReadLatency - metricSqlserverLockWaitRate metricSqlserverLockWaitRate - metricSqlserverLockWaitTimeAvg metricSqlserverLockWaitTimeAvg - metricSqlserverPageBufferCacheHitRatio metricSqlserverPageBufferCacheHitRatio - metricSqlserverPageCheckpointFlushRate metricSqlserverPageCheckpointFlushRate - metricSqlserverPageLazyWriteRate metricSqlserverPageLazyWriteRate - metricSqlserverPageLifeExpectancy metricSqlserverPageLifeExpectancy - metricSqlserverPageOperationRate metricSqlserverPageOperationRate - metricSqlserverPageSplitRate metricSqlserverPageSplitRate - metricSqlserverTransactionRate metricSqlserverTransactionRate - metricSqlserverTransactionWriteRate metricSqlserverTransactionWriteRate - metricSqlserverTransactionLogFlushDataRate metricSqlserverTransactionLogFlushDataRate - metricSqlserverTransactionLogFlushRate metricSqlserverTransactionLogFlushRate - metricSqlserverTransactionLogFlushWaitRate metricSqlserverTransactionLogFlushWaitRate - metricSqlserverTransactionLogGrowthCount metricSqlserverTransactionLogGrowthCount - metricSqlserverTransactionLogShrinkCount metricSqlserverTransactionLogShrinkCount - metricSqlserverTransactionLogUsage metricSqlserverTransactionLogUsage - metricSqlserverUserConnectionCount metricSqlserverUserConnectionCount + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + resourceAttributeIncludeFilter map[string]filter.Filter + resourceAttributeExcludeFilter map[string]filter.Filter + metricSqlserverBatchRequestRate metricSqlserverBatchRequestRate + metricSqlserverBatchSQLCompilationRate metricSqlserverBatchSQLCompilationRate + metricSqlserverBatchSQLRecompilationRate metricSqlserverBatchSQLRecompilationRate + metricSqlserverDatabaseIoReadLatency metricSqlserverDatabaseIoReadLatency + metricSqlserverLockWaitRate metricSqlserverLockWaitRate + metricSqlserverLockWaitTimeAvg metricSqlserverLockWaitTimeAvg + metricSqlserverPageBufferCacheHitRatio metricSqlserverPageBufferCacheHitRatio + metricSqlserverPageCheckpointFlushRate metricSqlserverPageCheckpointFlushRate + metricSqlserverPageLazyWriteRate metricSqlserverPageLazyWriteRate + metricSqlserverPageLifeExpectancy metricSqlserverPageLifeExpectancy + metricSqlserverPageOperationRate metricSqlserverPageOperationRate + metricSqlserverPageSplitRate metricSqlserverPageSplitRate + metricSqlserverProcessesBlocked metricSqlserverProcessesBlocked + metricSqlserverResourcePoolDiskThrottledReadRate metricSqlserverResourcePoolDiskThrottledReadRate + metricSqlserverResourcePoolDiskThrottledWriteRate metricSqlserverResourcePoolDiskThrottledWriteRate + metricSqlserverTransactionRate metricSqlserverTransactionRate + metricSqlserverTransactionWriteRate metricSqlserverTransactionWriteRate + metricSqlserverTransactionLogFlushDataRate metricSqlserverTransactionLogFlushDataRate + metricSqlserverTransactionLogFlushRate metricSqlserverTransactionLogFlushRate + metricSqlserverTransactionLogFlushWaitRate metricSqlserverTransactionLogFlushWaitRate + metricSqlserverTransactionLogGrowthCount metricSqlserverTransactionLogGrowthCount + metricSqlserverTransactionLogShrinkCount metricSqlserverTransactionLogShrinkCount + metricSqlserverTransactionLogUsage metricSqlserverTransactionLogUsage + metricSqlserverUserConnectionCount metricSqlserverUserConnectionCount } // metricBuilderOption applies changes to default metrics builder. @@ -1124,33 +1276,36 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - config: mbc, - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricSqlserverBatchRequestRate: newMetricSqlserverBatchRequestRate(mbc.Metrics.SqlserverBatchRequestRate), - metricSqlserverBatchSQLCompilationRate: newMetricSqlserverBatchSQLCompilationRate(mbc.Metrics.SqlserverBatchSQLCompilationRate), - metricSqlserverBatchSQLRecompilationRate: newMetricSqlserverBatchSQLRecompilationRate(mbc.Metrics.SqlserverBatchSQLRecompilationRate), - metricSqlserverDatabaseIoReadLatency: newMetricSqlserverDatabaseIoReadLatency(mbc.Metrics.SqlserverDatabaseIoReadLatency), - metricSqlserverLockWaitRate: newMetricSqlserverLockWaitRate(mbc.Metrics.SqlserverLockWaitRate), - metricSqlserverLockWaitTimeAvg: newMetricSqlserverLockWaitTimeAvg(mbc.Metrics.SqlserverLockWaitTimeAvg), - metricSqlserverPageBufferCacheHitRatio: newMetricSqlserverPageBufferCacheHitRatio(mbc.Metrics.SqlserverPageBufferCacheHitRatio), - metricSqlserverPageCheckpointFlushRate: newMetricSqlserverPageCheckpointFlushRate(mbc.Metrics.SqlserverPageCheckpointFlushRate), - metricSqlserverPageLazyWriteRate: newMetricSqlserverPageLazyWriteRate(mbc.Metrics.SqlserverPageLazyWriteRate), - metricSqlserverPageLifeExpectancy: newMetricSqlserverPageLifeExpectancy(mbc.Metrics.SqlserverPageLifeExpectancy), - metricSqlserverPageOperationRate: newMetricSqlserverPageOperationRate(mbc.Metrics.SqlserverPageOperationRate), - metricSqlserverPageSplitRate: newMetricSqlserverPageSplitRate(mbc.Metrics.SqlserverPageSplitRate), - metricSqlserverTransactionRate: newMetricSqlserverTransactionRate(mbc.Metrics.SqlserverTransactionRate), - metricSqlserverTransactionWriteRate: newMetricSqlserverTransactionWriteRate(mbc.Metrics.SqlserverTransactionWriteRate), - metricSqlserverTransactionLogFlushDataRate: newMetricSqlserverTransactionLogFlushDataRate(mbc.Metrics.SqlserverTransactionLogFlushDataRate), - metricSqlserverTransactionLogFlushRate: newMetricSqlserverTransactionLogFlushRate(mbc.Metrics.SqlserverTransactionLogFlushRate), - metricSqlserverTransactionLogFlushWaitRate: newMetricSqlserverTransactionLogFlushWaitRate(mbc.Metrics.SqlserverTransactionLogFlushWaitRate), - metricSqlserverTransactionLogGrowthCount: newMetricSqlserverTransactionLogGrowthCount(mbc.Metrics.SqlserverTransactionLogGrowthCount), - metricSqlserverTransactionLogShrinkCount: newMetricSqlserverTransactionLogShrinkCount(mbc.Metrics.SqlserverTransactionLogShrinkCount), - metricSqlserverTransactionLogUsage: newMetricSqlserverTransactionLogUsage(mbc.Metrics.SqlserverTransactionLogUsage), - metricSqlserverUserConnectionCount: newMetricSqlserverUserConnectionCount(mbc.Metrics.SqlserverUserConnectionCount), - resourceAttributeIncludeFilter: make(map[string]filter.Filter), - resourceAttributeExcludeFilter: make(map[string]filter.Filter), + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricSqlserverBatchRequestRate: newMetricSqlserverBatchRequestRate(mbc.Metrics.SqlserverBatchRequestRate), + metricSqlserverBatchSQLCompilationRate: newMetricSqlserverBatchSQLCompilationRate(mbc.Metrics.SqlserverBatchSQLCompilationRate), + metricSqlserverBatchSQLRecompilationRate: newMetricSqlserverBatchSQLRecompilationRate(mbc.Metrics.SqlserverBatchSQLRecompilationRate), + metricSqlserverDatabaseIoReadLatency: newMetricSqlserverDatabaseIoReadLatency(mbc.Metrics.SqlserverDatabaseIoReadLatency), + metricSqlserverLockWaitRate: newMetricSqlserverLockWaitRate(mbc.Metrics.SqlserverLockWaitRate), + metricSqlserverLockWaitTimeAvg: newMetricSqlserverLockWaitTimeAvg(mbc.Metrics.SqlserverLockWaitTimeAvg), + metricSqlserverPageBufferCacheHitRatio: newMetricSqlserverPageBufferCacheHitRatio(mbc.Metrics.SqlserverPageBufferCacheHitRatio), + metricSqlserverPageCheckpointFlushRate: newMetricSqlserverPageCheckpointFlushRate(mbc.Metrics.SqlserverPageCheckpointFlushRate), + metricSqlserverPageLazyWriteRate: newMetricSqlserverPageLazyWriteRate(mbc.Metrics.SqlserverPageLazyWriteRate), + metricSqlserverPageLifeExpectancy: newMetricSqlserverPageLifeExpectancy(mbc.Metrics.SqlserverPageLifeExpectancy), + metricSqlserverPageOperationRate: newMetricSqlserverPageOperationRate(mbc.Metrics.SqlserverPageOperationRate), + metricSqlserverPageSplitRate: newMetricSqlserverPageSplitRate(mbc.Metrics.SqlserverPageSplitRate), + metricSqlserverProcessesBlocked: newMetricSqlserverProcessesBlocked(mbc.Metrics.SqlserverProcessesBlocked), + metricSqlserverResourcePoolDiskThrottledReadRate: newMetricSqlserverResourcePoolDiskThrottledReadRate(mbc.Metrics.SqlserverResourcePoolDiskThrottledReadRate), + metricSqlserverResourcePoolDiskThrottledWriteRate: newMetricSqlserverResourcePoolDiskThrottledWriteRate(mbc.Metrics.SqlserverResourcePoolDiskThrottledWriteRate), + metricSqlserverTransactionRate: newMetricSqlserverTransactionRate(mbc.Metrics.SqlserverTransactionRate), + metricSqlserverTransactionWriteRate: newMetricSqlserverTransactionWriteRate(mbc.Metrics.SqlserverTransactionWriteRate), + metricSqlserverTransactionLogFlushDataRate: newMetricSqlserverTransactionLogFlushDataRate(mbc.Metrics.SqlserverTransactionLogFlushDataRate), + metricSqlserverTransactionLogFlushRate: newMetricSqlserverTransactionLogFlushRate(mbc.Metrics.SqlserverTransactionLogFlushRate), + metricSqlserverTransactionLogFlushWaitRate: newMetricSqlserverTransactionLogFlushWaitRate(mbc.Metrics.SqlserverTransactionLogFlushWaitRate), + metricSqlserverTransactionLogGrowthCount: newMetricSqlserverTransactionLogGrowthCount(mbc.Metrics.SqlserverTransactionLogGrowthCount), + metricSqlserverTransactionLogShrinkCount: newMetricSqlserverTransactionLogShrinkCount(mbc.Metrics.SqlserverTransactionLogShrinkCount), + metricSqlserverTransactionLogUsage: newMetricSqlserverTransactionLogUsage(mbc.Metrics.SqlserverTransactionLogUsage), + metricSqlserverUserConnectionCount: newMetricSqlserverUserConnectionCount(mbc.Metrics.SqlserverUserConnectionCount), + resourceAttributeIncludeFilter: make(map[string]filter.Filter), + resourceAttributeExcludeFilter: make(map[string]filter.Filter), } if mbc.ResourceAttributes.SqlserverComputerName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["sqlserver.computer.name"] = filter.CreateFilter(mbc.ResourceAttributes.SqlserverComputerName.MetricsInclude) @@ -1243,6 +1398,9 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricSqlserverPageLifeExpectancy.emit(ils.Metrics()) mb.metricSqlserverPageOperationRate.emit(ils.Metrics()) mb.metricSqlserverPageSplitRate.emit(ils.Metrics()) + mb.metricSqlserverProcessesBlocked.emit(ils.Metrics()) + mb.metricSqlserverResourcePoolDiskThrottledReadRate.emit(ils.Metrics()) + mb.metricSqlserverResourcePoolDiskThrottledWriteRate.emit(ils.Metrics()) mb.metricSqlserverTransactionRate.emit(ils.Metrics()) mb.metricSqlserverTransactionWriteRate.emit(ils.Metrics()) mb.metricSqlserverTransactionLogFlushDataRate.emit(ils.Metrics()) @@ -1343,6 +1501,36 @@ func (mb *MetricsBuilder) RecordSqlserverPageSplitRateDataPoint(ts pcommon.Times mb.metricSqlserverPageSplitRate.recordDataPoint(mb.startTime, ts, val) } +// RecordSqlserverProcessesBlockedDataPoint adds a data point to sqlserver.processes.blocked metric. +func (mb *MetricsBuilder) RecordSqlserverProcessesBlockedDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for SqlserverProcessesBlocked, value was %s: %w", inputVal, err) + } + mb.metricSqlserverProcessesBlocked.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordSqlserverResourcePoolDiskThrottledReadRateDataPoint adds a data point to sqlserver.resource_pool.disk.throttled.read.rate metric. +func (mb *MetricsBuilder) RecordSqlserverResourcePoolDiskThrottledReadRateDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseInt(inputVal, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse int64 for SqlserverResourcePoolDiskThrottledReadRate, value was %s: %w", inputVal, err) + } + mb.metricSqlserverResourcePoolDiskThrottledReadRate.recordDataPoint(mb.startTime, ts, val) + return nil +} + +// RecordSqlserverResourcePoolDiskThrottledWriteRateDataPoint adds a data point to sqlserver.resource_pool.disk.throttled.write.rate metric. +func (mb *MetricsBuilder) RecordSqlserverResourcePoolDiskThrottledWriteRateDataPoint(ts pcommon.Timestamp, inputVal string) error { + val, err := strconv.ParseFloat(inputVal, 64) + if err != nil { + return fmt.Errorf("failed to parse float64 for SqlserverResourcePoolDiskThrottledWriteRate, value was %s: %w", inputVal, err) + } + mb.metricSqlserverResourcePoolDiskThrottledWriteRate.recordDataPoint(mb.startTime, ts, val) + return nil +} + // RecordSqlserverTransactionRateDataPoint adds a data point to sqlserver.transaction.rate metric. func (mb *MetricsBuilder) RecordSqlserverTransactionRateDataPoint(ts pcommon.Timestamp, val float64) { mb.metricSqlserverTransactionRate.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/sqlserverreceiver/internal/metadata/generated_metrics_test.go b/receiver/sqlserverreceiver/internal/metadata/generated_metrics_test.go index 11fbcc4b5303..b8778f9b26fd 100644 --- a/receiver/sqlserverreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/sqlserverreceiver/internal/metadata/generated_metrics_test.go @@ -115,6 +115,15 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordSqlserverPageSplitRateDataPoint(ts, 1) + allMetricsCount++ + mb.RecordSqlserverProcessesBlockedDataPoint(ts, "1") + + allMetricsCount++ + mb.RecordSqlserverResourcePoolDiskThrottledReadRateDataPoint(ts, "1") + + allMetricsCount++ + mb.RecordSqlserverResourcePoolDiskThrottledWriteRateDataPoint(ts, "1") + defaultMetricsCount++ allMetricsCount++ mb.RecordSqlserverTransactionRateDataPoint(ts, 1) @@ -335,6 +344,42 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.Equal(t, float64(1), dp.DoubleValue()) + case "sqlserver.processes.blocked": + assert.False(t, validatedMetrics["sqlserver.processes.blocked"], "Found a duplicate in the metrics slice: sqlserver.processes.blocked") + validatedMetrics["sqlserver.processes.blocked"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of processes that are currently blocked", ms.At(i).Description()) + assert.Equal(t, "{processes}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "sqlserver.resource_pool.disk.throttled.read.rate": + assert.False(t, validatedMetrics["sqlserver.resource_pool.disk.throttled.read.rate"], "Found a duplicate in the metrics slice: sqlserver.resource_pool.disk.throttled.read.rate") + validatedMetrics["sqlserver.resource_pool.disk.throttled.read.rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of read operations that were throttled in the last second", ms.At(i).Description()) + assert.Equal(t, "{reads}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "sqlserver.resource_pool.disk.throttled.write.rate": + assert.False(t, validatedMetrics["sqlserver.resource_pool.disk.throttled.write.rate"], "Found a duplicate in the metrics slice: sqlserver.resource_pool.disk.throttled.write.rate") + validatedMetrics["sqlserver.resource_pool.disk.throttled.write.rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of write operations that were throttled in the last second", ms.At(i).Description()) + assert.Equal(t, "{writes}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "sqlserver.transaction.rate": assert.False(t, validatedMetrics["sqlserver.transaction.rate"], "Found a duplicate in the metrics slice: sqlserver.transaction.rate") validatedMetrics["sqlserver.transaction.rate"] = true diff --git a/receiver/sqlserverreceiver/internal/metadata/testdata/config.yaml b/receiver/sqlserverreceiver/internal/metadata/testdata/config.yaml index e7470fd6536f..e12c1eb88bf7 100644 --- a/receiver/sqlserverreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/sqlserverreceiver/internal/metadata/testdata/config.yaml @@ -25,6 +25,12 @@ all_set: enabled: true sqlserver.page.split.rate: enabled: true + sqlserver.processes.blocked: + enabled: true + sqlserver.resource_pool.disk.throttled.read.rate: + enabled: true + sqlserver.resource_pool.disk.throttled.write.rate: + enabled: true sqlserver.transaction.rate: enabled: true sqlserver.transaction.write.rate: @@ -76,6 +82,12 @@ none_set: enabled: false sqlserver.page.split.rate: enabled: false + sqlserver.processes.blocked: + enabled: false + sqlserver.resource_pool.disk.throttled.read.rate: + enabled: false + sqlserver.resource_pool.disk.throttled.write.rate: + enabled: false sqlserver.transaction.rate: enabled: false sqlserver.transaction.write.rate: diff --git a/receiver/sqlserverreceiver/metadata.yaml b/receiver/sqlserverreceiver/metadata.yaml index 27be11e86c25..c52bca3723c9 100644 --- a/receiver/sqlserverreceiver/metadata.yaml +++ b/receiver/sqlserverreceiver/metadata.yaml @@ -196,6 +196,33 @@ metrics: value_type: double attributes: [physical_filename, logical_filename, file_type] extended_documentation: This metric is only available when the receiver is configured to directly connect to SQL Server. + sqlserver.resource_pool.disk.throttled.read.rate: + enabled: false + description: The number of read operations that were throttled in the last second + unit: "{reads}/s" + gauge: + value_type: int + input_type: string + attributes: [] + extended_documentation: This metric is only available when the receiver is configured to directly connect to SQL Server. + sqlserver.resource_pool.disk.throttled.write.rate: + enabled: false + description: The number of write operations that were throttled in the last second + unit: "{writes}/s" + gauge: + value_type: double + input_type: string + attributes: [] + extended_documentation: This metric is only available when the receiver is configured to directly connect to SQL Server. + sqlserver.processes.blocked: + enabled: false + description: The number of processes that are currently blocked + unit: "{processes}" + gauge: + value_type: int + input_type: string + attributes: [] + extended_documentation: This metric is only available when the receiver is configured to directly connect to SQL Server. tests: config: diff --git a/receiver/sqlserverreceiver/queries.go b/receiver/sqlserverreceiver/queries.go index 1fcb6dce1017..97d277d1a0e6 100644 --- a/receiver/sqlserverreceiver/queries.go +++ b/receiver/sqlserverreceiver/queries.go @@ -5,6 +5,7 @@ package sqlserverreceiver // import "github.com/open-telemetry/opentelemetry-col import ( "fmt" + "strings" ) // Direct access to queries is not recommended: The receiver allows filtering based on @@ -62,3 +63,181 @@ func getSQLServerDatabaseIOQuery(instanceName string) string { return fmt.Sprintf(sqlServerDatabaseIOQuery, "") } + +const sqlServerPerformanceCountersQuery string = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterprise,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard, Enterprise or Express. This query is only supported on these editions.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE + @SqlStatement AS nvarchar(max) + ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128) + ,[counter_name] nvarchar(128) + ,[instance_name] nvarchar(128) + ,[cntr_value] bigint + ,[cntr_type] int + PRIMARY KEY([object_name], [counter_name], [instance_name]) +); + +WITH PerfCounters AS ( +SELECT DISTINCT + RTRIM(spi.[object_name]) [object_name] + ,RTRIM(spi.[counter_name]) [counter_name] + ,RTRIM(spi.[instance_name]) AS [instance_name] + ,CAST(spi.[cntr_value] AS bigint) AS [cntr_value] + ,spi.[cntr_type] + FROM sys.dm_os_performance_counters AS spi + WHERE + counter_name IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Average Latch Wait Time (ms)' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Database Pages' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' + ,'Query Store physical reads' + ,'Query Store logical reads' + ,'Query Store logical writes' + ) OR ( + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + spi.[instance_name] IN ('_Total') + AND spi.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) +) + +INSERT INTO @PCounters SELECT * FROM PerfCounters; + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] + ,CAST(pc.[cntr_type] AS varchar(25)) AS [counter_type] +FROM @PCounters AS pc +LEFT OUTER JOIN @PCounters AS pc1 + ON ( + pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') + OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') + ) + AND pc.[object_name] = pc1.[object_name] + AND pc.[instance_name] = pc1.[instance_name] + AND pc1.[counter_name] LIKE '%base' +WHERE + pc.[counter_name] NOT LIKE '% base' +{filter_instance_name} +OPTION(RECOMPILE) +` + +func getSQLServerPerformanceCounterQuery(instanceName string) string { + if instanceName != "" { + whereClause := fmt.Sprintf("\tAND @@SERVERNAME = '%s'", instanceName) + r := strings.NewReplacer("{filter_instance_name}", whereClause) + return r.Replace(sqlServerPerformanceCountersQuery) + } + + r := strings.NewReplacer("{filter_instance_name}", "") + return r.Replace(sqlServerPerformanceCountersQuery) +} diff --git a/receiver/sqlserverreceiver/queries_test.go b/receiver/sqlserverreceiver/queries_test.go index 139dc410e039..c23a90f74b40 100644 --- a/receiver/sqlserverreceiver/queries_test.go +++ b/receiver/sqlserverreceiver/queries_test.go @@ -12,24 +12,49 @@ import ( "github.com/stretchr/testify/require" ) -func TestQueryIODBWithoutInstanceName(t *testing.T) { - expectedBytes, err := os.ReadFile(path.Join("./testdata", "databaseIOQueryWithoutInstanceName.txt")) - require.NoError(t, err) - // Replace all will fix newlines when testing on Windows - expected := strings.ReplaceAll(string(expectedBytes), "\r\n", "\n") +func TestQueryContents(t *testing.T) { + queryTests := []struct { + name string + instanceName string + getQuery func(string) string + expectedQueryValFilename string + }{ + { + name: "Test database IO query without instance name", + instanceName: "", + getQuery: getSQLServerDatabaseIOQuery, + expectedQueryValFilename: "databaseIOQueryWithoutInstanceName.txt", + }, + { + name: "Test database IO query with instance name", + instanceName: "instanceName", + getQuery: getSQLServerDatabaseIOQuery, + expectedQueryValFilename: "databaseIOQueryWithInstanceName.txt", + }, + { + name: "Test perf counter query without instance name", + instanceName: "", + getQuery: getSQLServerPerformanceCounterQuery, + expectedQueryValFilename: "perfCounterQueryWithoutInstanceName.txt", + }, + { + name: "Test perf counter query with instance name", + instanceName: "instanceName", + getQuery: getSQLServerPerformanceCounterQuery, + expectedQueryValFilename: "perfCounterQueryWithInstanceName.txt", + }, + } + + for _, tt := range queryTests { + t.Run(tt.name, func(t *testing.T) { + expectedBytes, err := os.ReadFile(path.Join("./testdata", tt.expectedQueryValFilename)) + require.NoError(t, err) + // Replace all will fix newlines when testing on Windows + expected := strings.ReplaceAll(string(expectedBytes), "\r\n", "\n") + + actual := tt.getQuery(tt.instanceName) + require.Equal(t, expected, actual) + }) + } - actual := getSQLServerDatabaseIOQuery("") - - require.Equal(t, expected, actual) -} - -func TestQueryIODBWithInstanceName(t *testing.T) { - expectedBytes, err := os.ReadFile(path.Join("./testdata", "databaseIOQueryWithInstanceName.txt")) - require.NoError(t, err) - // Replace all will fix newlines when testing on Windows - expected := strings.ReplaceAll(string(expectedBytes), "\r\n", "\n") - - actual := getSQLServerDatabaseIOQuery("instanceName") - - require.Equal(t, expected, actual) } diff --git a/receiver/sqlserverreceiver/scraper.go b/receiver/sqlserverreceiver/scraper.go index 16f65d27bad4..1f65384bb8f7 100644 --- a/receiver/sqlserverreceiver/scraper.go +++ b/receiver/sqlserverreceiver/scraper.go @@ -21,6 +21,8 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlserverreceiver/internal/metadata" ) +const instanceNameKey = "sql_instance" + type sqlServerScraperHelper struct { id component.ID sqlQuery string @@ -82,6 +84,8 @@ func (s *sqlServerScraperHelper) Scrape(ctx context.Context) (pmetric.Metrics, e switch s.sqlQuery { case getSQLServerDatabaseIOQuery(s.instanceName): err = s.recordDatabaseIOMetrics(ctx, rb) + case getSQLServerPerformanceCounterQuery(s.instanceName): + err = s.recordDatabasePerfCounterMetrics(ctx, rb) default: return pmetric.Metrics{}, fmt.Errorf("Attempted to get metrics from unsupported query: %s", s.sqlQuery) } @@ -104,7 +108,6 @@ func (s *sqlServerScraperHelper) recordDatabaseIOMetrics(ctx context.Context, rb // TODO: Move constants out to the package level when other queries are added. const computerNameKey = "computer_name" const databaseNameKey = "database_name" - const instanceNameKey = "sql_instance" const physicalFilenameKey = "physical_filename" const logicalFilenameKey = "logical_filename" const fileTypeKey = "file_type" @@ -144,3 +147,50 @@ func (s *sqlServerScraperHelper) recordDatabaseIOMetrics(ctx context.Context, rb return errors.Join(errs...) } + +func (s *sqlServerScraperHelper) recordDatabasePerfCounterMetrics(ctx context.Context, rb *metadata.ResourceBuilder) error { + const counterKey = "counter" + const valueKey = "value" + // Constants are the columns for metrics from query + const diskReadIOThrottled = "Disk Read IO Throttled/sec" + const diskWriteIOThrottled = "Disk Write IO Throttled/sec" + const lockWaits = "Lock Waits/sec" + const processesBlocked = "Processes blocked" + + rows, err := s.client.QueryRows(ctx) + + if err != nil { + if errors.Is(err, sqlquery.ErrNullValueWarning) { + s.logger.Warn("problems encountered getting metric rows", zap.Error(err)) + } else { + return fmt.Errorf("sqlServerScraperHelper: %w", err) + } + } + + var errs []error + now := pcommon.NewTimestampFromTime(time.Now()) + for i, row := range rows { + if i == 0 { + rb.SetSqlserverInstanceName(row[instanceNameKey]) + } + + switch row[counterKey] { + case diskReadIOThrottled: + errs = append(errs, s.mb.RecordSqlserverResourcePoolDiskThrottledReadRateDataPoint(now, row[valueKey])) + case diskWriteIOThrottled: + errs = append(errs, s.mb.RecordSqlserverResourcePoolDiskThrottledWriteRateDataPoint(now, row[valueKey])) + case lockWaits: + val, err := strconv.ParseFloat(row[valueKey], 64) + if err != nil { + err = fmt.Errorf("row %d: %w", i, err) + errs = append(errs, err) + } else { + s.mb.RecordSqlserverLockWaitRateDataPoint(now, val) + } + case processesBlocked: + errs = append(errs, s.mb.RecordSqlserverProcessesBlockedDataPoint(now, row[valueKey])) + } + } + + return errors.Join(errs...) +} diff --git a/receiver/sqlserverreceiver/scraper_test.go b/receiver/sqlserverreceiver/scraper_test.go index 3538be3d1093..61124e7c9137 100644 --- a/receiver/sqlserverreceiver/scraper_test.go +++ b/receiver/sqlserverreceiver/scraper_test.go @@ -31,7 +31,10 @@ func TestEmptyScrape(t *testing.T) { assert.NoError(t, cfg.Validate()) // Ensure there aren't any scrapers when all metrics are disabled. - cfg.MetricsBuilderConfig.Metrics.SqlserverDatabaseIoReadLatency.Enabled = false + // The locks metric is the only scraper metric enabled by default, as it is reusing + // a performance counter metric, and can be gather either by perf counters, or + // by scraping. + cfg.MetricsBuilderConfig.Metrics.SqlserverLockWaitRate.Enabled = false scrapers := setupSQLServerScrapers(receivertest.NewNopCreateSettings(), cfg) assert.Empty(t, scrapers) } @@ -47,13 +50,17 @@ func TestSuccessfulScrape(t *testing.T) { // Ensure all metrics are received when all are enabled. cfg.MetricsBuilderConfig.Metrics.SqlserverDatabaseIoReadLatency.Enabled = true + cfg.MetricsBuilderConfig.Metrics.SqlserverResourcePoolDiskThrottledReadRate.Enabled = true + cfg.MetricsBuilderConfig.Metrics.SqlserverResourcePoolDiskThrottledWriteRate.Enabled = true + cfg.MetricsBuilderConfig.Metrics.SqlserverProcessesBlocked.Enabled = true + scrapers := setupSQLServerScrapers(receivertest.NewNopCreateSettings(), cfg) assert.NotNil(t, scrapers) for _, scraper := range scrapers { err := scraper.Start(context.Background(), componenttest.NewNopHost()) assert.NoError(t, err) - defer func() { assert.NoError(t, scraper.Shutdown(context.Background())) }() + defer assert.NoError(t, scraper.Shutdown(context.Background())) scraper.client = mockClient{ instanceName: scraper.instanceName, @@ -63,7 +70,14 @@ func TestSuccessfulScrape(t *testing.T) { actualMetrics, err := scraper.Scrape(context.Background()) assert.NoError(t, err) - expectedFile := filepath.Join("testdata", "expected_database_io.yaml") + var expectedFile string + switch scraper.sqlQuery { + case getSQLServerDatabaseIOQuery(scraper.instanceName): + expectedFile = filepath.Join("testdata", "expectedDatabaseIO.yaml") + case getSQLServerPerformanceCounterQuery(scraper.instanceName): + expectedFile = filepath.Join("testdata", "expectedPerfCounters.yaml") + } + // Uncomment line below to re-generate expected metrics. // golden.WriteMetrics(t, expectedFile, actualMetrics) expectedMetrics, err := golden.ReadMetrics(expectedFile) @@ -88,13 +102,16 @@ func TestScrapeInvalidQuery(t *testing.T) { // Ensure all metrics are received when all are enabled. cfg.MetricsBuilderConfig.Metrics.SqlserverDatabaseIoReadLatency.Enabled = true + cfg.MetricsBuilderConfig.Metrics.SqlserverResourcePoolDiskThrottledReadRate.Enabled = true + cfg.MetricsBuilderConfig.Metrics.SqlserverResourcePoolDiskThrottledWriteRate.Enabled = true + cfg.MetricsBuilderConfig.Metrics.SqlserverProcessesBlocked.Enabled = true scrapers := setupSQLServerScrapers(receivertest.NewNopCreateSettings(), cfg) assert.NotNil(t, scrapers) for _, scraper := range scrapers { err := scraper.Start(context.Background(), componenttest.NewNopHost()) assert.NoError(t, err) - defer func() { assert.NoError(t, scraper.Shutdown(context.Background())) }() + defer assert.NoError(t, scraper.Shutdown(context.Background())) scraper.client = mockClient{ instanceName: scraper.instanceName, @@ -131,13 +148,20 @@ func readFile(fname string) ([]sqlquery.StringMap, error) { } func (mc mockClient) QueryRows(context.Context, ...any) ([]sqlquery.StringMap, error) { - if mc.SQL == getSQLServerDatabaseIOQuery(mc.instanceName) { - queryResults, err := readFile("database_io_scraped_data.txt") - if err != nil { - return nil, err - } - return queryResults, nil + var queryResults []sqlquery.StringMap + var err error + + switch mc.SQL { + case getSQLServerDatabaseIOQuery(mc.instanceName): + queryResults, err = readFile("database_io_scraped_data.txt") + case getSQLServerPerformanceCounterQuery(mc.instanceName): + queryResults, err = readFile("perfCounterQueryData.txt") + default: + return nil, fmt.Errorf("No valid query found") } - return nil, fmt.Errorf("No valid query found") + if err != nil { + return nil, err + } + return queryResults, nil } diff --git a/receiver/sqlserverreceiver/testdata/expected_database_io.yaml b/receiver/sqlserverreceiver/testdata/expectedDatabaseIO.yaml similarity index 100% rename from receiver/sqlserverreceiver/testdata/expected_database_io.yaml rename to receiver/sqlserverreceiver/testdata/expectedDatabaseIO.yaml diff --git a/receiver/sqlserverreceiver/testdata/expectedPerfCounters.yaml b/receiver/sqlserverreceiver/testdata/expectedPerfCounters.yaml new file mode 100644 index 000000000000..edb17de9fcfb --- /dev/null +++ b/receiver/sqlserverreceiver/testdata/expectedPerfCounters.yaml @@ -0,0 +1,49 @@ +resourceMetrics: + - resource: + attributes: + - key: sqlserver.instance.name + value: + stringValue: 8cac97ac9b8f + scopeMetrics: + - metrics: + - description: Number of lock requests resulting in a wait. + gauge: + dataPoints: + - asDouble: 17 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: sqlserver.lock.wait.rate + unit: '{requests}/s' + - description: The number of processes that are currently blocked + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: sqlserver.processes.blocked + unit: '{processes}' + - description: The number of read operations that were throttled in the last second + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: sqlserver.resource_pool.disk.throttled.read.rate + unit: '{reads}/s' + - description: The number of write operations that were throttled in the last second + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: sqlserver.resource_pool.disk.throttled.write.rate + unit: '{writes}/s' + scope: + name: otelcol/sqlserverreceiver + version: latest diff --git a/receiver/sqlserverreceiver/testdata/perfCounterQueryData.txt b/receiver/sqlserverreceiver/testdata/perfCounterQueryData.txt new file mode 100644 index 000000000000..4b9ec5411d96 --- /dev/null +++ b/receiver/sqlserverreceiver/testdata/perfCounterQueryData.txt @@ -0,0 +1,2603 @@ +[ + { + "counter":"Forwarded Records/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Access Methods", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Full Scans/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Access Methods", + "sql_instance":"8cac97ac9b8f", + "value":"14458" + }, + { + "counter":"Index Searches/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Access Methods", + "sql_instance":"8cac97ac9b8f", + "value":"1.619118e+06" + }, + { + "counter":"Page Splits/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Access Methods", + "sql_instance":"8cac97ac9b8f", + "value":"871" + }, + { + "counter":"Table Lock Escalations/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Access Methods", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Workfiles Created/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Access Methods", + "sql_instance":"8cac97ac9b8f", + "value":"816" + }, + { + "counter":"Worktables Created/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Access Methods", + "sql_instance":"8cac97ac9b8f", + "value":"895" + }, + { + "counter":"Bytes Received from Replica/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Availability Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Bytes Sent to Replica/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Availability Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Bytes Sent to Transport/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Availability Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Flow Control Time (ms/sec)", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Availability Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Flow Control/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Availability Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Receives from Replica/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Availability Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Resent Messages/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Availability Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Sends to Replica/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Availability Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Sends to Transport/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Availability Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=000000ms \u0026 \u003c000001ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"1952" + }, + { + "counter":"Batches \u003e=000001ms \u0026 \u003c000002ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"205" + }, + { + "counter":"Batches \u003e=000002ms \u0026 \u003c000005ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"160" + }, + { + "counter":"Batches \u003e=000005ms \u0026 \u003c000010ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"93" + }, + { + "counter":"Batches \u003e=000010ms \u0026 \u003c000020ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"220" + }, + { + "counter":"Batches \u003e=000020ms \u0026 \u003c000050ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"437" + }, + { + "counter":"Batches \u003e=000050ms \u0026 \u003c000100ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"158" + }, + { + "counter":"Batches \u003e=000100ms \u0026 \u003c000200ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"130" + }, + { + "counter":"Batches \u003e=000200ms \u0026 \u003c000500ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"18" + }, + { + "counter":"Batches \u003e=000500ms \u0026 \u003c001000ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"1" + }, + { + "counter":"Batches \u003e=001000ms \u0026 \u003c002000ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=002000ms \u0026 \u003c005000ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=005000ms \u0026 \u003c010000ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=100000ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=010000ms \u0026 \u003c020000ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=020000ms \u0026 \u003c050000ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=050000ms \u0026 \u003c100000ms", + "counter_type":"65792", + "instance":"CPU Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=050000ms \u0026 \u003c100000ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=020000ms \u0026 \u003c050000ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=010000ms \u0026 \u003c020000ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=100000ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=005000ms \u0026 \u003c010000ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=002000ms \u0026 \u003c005000ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=001000ms \u0026 \u003c002000ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=000500ms \u0026 \u003c001000ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"531" + }, + { + "counter":"Batches \u003e=000200ms \u0026 \u003c000500ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"5576" + }, + { + "counter":"Batches \u003e=000100ms \u0026 \u003c000200ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"16932" + }, + { + "counter":"Batches \u003e=000050ms \u0026 \u003c000100ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"10879" + }, + { + "counter":"Batches \u003e=000020ms \u0026 \u003c000050ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"13253" + }, + { + "counter":"Batches \u003e=000010ms \u0026 \u003c000020ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"3032" + }, + { + "counter":"Batches \u003e=000005ms \u0026 \u003c000010ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"613" + }, + { + "counter":"Batches \u003e=000002ms \u0026 \u003c000005ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"457" + }, + { + "counter":"Batches \u003e=000001ms \u0026 \u003c000002ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"205" + }, + { + "counter":"Batches \u003e=000000ms \u0026 \u003c000001ms", + "counter_type":"65792", + "instance":"CPU Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=000000ms \u0026 \u003c000001ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"1884" + }, + { + "counter":"Batches \u003e=000001ms \u0026 \u003c000002ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"197" + }, + { + "counter":"Batches \u003e=000002ms \u0026 \u003c000005ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"175" + }, + { + "counter":"Batches \u003e=000005ms \u0026 \u003c000010ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"95" + }, + { + "counter":"Batches \u003e=000010ms \u0026 \u003c000020ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"204" + }, + { + "counter":"Batches \u003e=000020ms \u0026 \u003c000050ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"316" + }, + { + "counter":"Batches \u003e=000050ms \u0026 \u003c000100ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"150" + }, + { + "counter":"Batches \u003e=000100ms \u0026 \u003c000200ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"135" + }, + { + "counter":"Batches \u003e=000200ms \u0026 \u003c000500ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"189" + }, + { + "counter":"Batches \u003e=000500ms \u0026 \u003c001000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"5" + }, + { + "counter":"Batches \u003e=001000ms \u0026 \u003c002000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=002000ms \u0026 \u003c005000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=005000ms \u0026 \u003c010000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=100000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=010000ms \u0026 \u003c020000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=020000ms \u0026 \u003c050000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=050000ms \u0026 \u003c100000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Requests", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=050000ms \u0026 \u003c100000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=020000ms \u0026 \u003c050000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=010000ms \u0026 \u003c020000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=100000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=005000ms \u0026 \u003c010000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=002000ms \u0026 \u003c005000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=001000ms \u0026 \u003c002000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Batches \u003e=000500ms \u0026 \u003c001000ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"2777" + }, + { + "counter":"Batches \u003e=000200ms \u0026 \u003c000500ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"68267" + }, + { + "counter":"Batches \u003e=000100ms \u0026 \u003c000200ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"19282" + }, + { + "counter":"Batches \u003e=000050ms \u0026 \u003c000100ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"10344" + }, + { + "counter":"Batches \u003e=000020ms \u0026 \u003c000050ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"8733" + }, + { + "counter":"Batches \u003e=000010ms \u0026 \u003c000020ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"2882" + }, + { + "counter":"Batches \u003e=000005ms \u0026 \u003c000010ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"636" + }, + { + "counter":"Batches \u003e=000002ms \u0026 \u003c000005ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"501" + }, + { + "counter":"Batches \u003e=000001ms \u0026 \u003c000002ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"197" + }, + { + "counter":"Batches \u003e=000000ms \u0026 \u003c000001ms", + "counter_type":"65792", + "instance":"Elapsed Time:Total(ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Batch Resp Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Background writer pages/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"248" + }, + { + "counter":"Buffer cache hit ratio", + "counter_type":"537003264", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"100" + }, + { + "counter":"Checkpoint pages/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"504" + }, + { + "counter":"Database pages", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"4844" + }, + { + "counter":"Free list stalls/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Lazy writes/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Page life expectancy", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"433208" + }, + { + "counter":"Page lookups/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"3.294704e+06" + }, + { + "counter":"Page reads/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"4410" + }, + { + "counter":"Page writes/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"1114" + }, + { + "counter":"Readahead pages/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Manager", + "sql_instance":"8cac97ac9b8f", + "value":"379" + }, + { + "counter":"Database pages", + "counter_type":"65792", + "instance":"000", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Node", + "sql_instance":"8cac97ac9b8f", + "value":"4844" + }, + { + "counter":"Page life expectancy", + "counter_type":"65792", + "instance":"000", + "measurement":"sqlserver_performance", + "object":"SQLServer:Buffer Node", + "sql_instance":"8cac97ac9b8f", + "value":"433208" + }, + { + "counter":"Group Commit Time", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Group Commits/Sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Apply Pending Queue", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Apply Ready Queue", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Bytes Received/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Send Queue", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Mirrored Write Transactions/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Recovery Queue", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Redone Bytes/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Transaction Delay", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Database Replica", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active Transactions", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Backup/Restore Throughput/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Data File(s) Size (KB)", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"155648" + }, + { + "counter":"Log Bytes Flushed/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"1.1055104e+07" + }, + { + "counter":"XTP Memory Used (KB)", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log File(s) Size (KB)", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"23752" + }, + { + "counter":"Log File(s) Used Size (KB)", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"5476" + }, + { + "counter":"Log Flush Wait Time", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"2736" + }, + { + "counter":"Log Flushes/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"775" + }, + { + "counter":"Log Growths", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"16" + }, + { + "counter":"Percent Log Used", + "counter_type":"65792", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"23" + }, + { + "counter":"Transactions/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"16622" + }, + { + "counter":"Write Transactions/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"718" + }, + { + "counter":"Write Transactions/sec", + "counter_type":"272696576", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"338" + }, + { + "counter":"Transactions/sec", + "counter_type":"272696576", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"2372" + }, + { + "counter":"Percent Log Used", + "counter_type":"65792", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"67" + }, + { + "counter":"Log Growths", + "counter_type":"65792", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"6" + }, + { + "counter":"Log Flushes/sec", + "counter_type":"272696576", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"412" + }, + { + "counter":"Log Flush Wait Time", + "counter_type":"272696576", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"2247" + }, + { + "counter":"Log File(s) Used Size (KB)", + "counter_type":"65792", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"1378" + }, + { + "counter":"Log File(s) Size (KB)", + "counter_type":"65792", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"2040" + }, + { + "counter":"XTP Memory Used (KB)", + "counter_type":"65792", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Bytes Flushed/sec", + "counter_type":"272696576", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"5.873664e+06" + }, + { + "counter":"Data File(s) Size (KB)", + "counter_type":"65792", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"4800" + }, + { + "counter":"Backup/Restore Throughput/sec", + "counter_type":"272696576", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active Transactions", + "counter_type":"65792", + "instance":"master", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active Transactions", + "counter_type":"65792", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Backup/Restore Throughput/sec", + "counter_type":"272696576", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Data File(s) Size (KB)", + "counter_type":"65792", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"8192" + }, + { + "counter":"Log Bytes Flushed/sec", + "counter_type":"272696576", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"675840" + }, + { + "counter":"XTP Memory Used (KB)", + "counter_type":"65792", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log File(s) Size (KB)", + "counter_type":"65792", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"8184" + }, + { + "counter":"Log File(s) Used Size (KB)", + "counter_type":"65792", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"853" + }, + { + "counter":"Log Flush Wait Time", + "counter_type":"272696576", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"113" + }, + { + "counter":"Log Flushes/sec", + "counter_type":"272696576", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"73" + }, + { + "counter":"Log Growths", + "counter_type":"65792", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Percent Log Used", + "counter_type":"65792", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"10" + }, + { + "counter":"Transactions/sec", + "counter_type":"272696576", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"4395" + }, + { + "counter":"Write Transactions/sec", + "counter_type":"272696576", + "instance":"model", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"33" + }, + { + "counter":"Write Transactions/sec", + "counter_type":"272696576", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"34" + }, + { + "counter":"Transactions/sec", + "counter_type":"272696576", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"1449" + }, + { + "counter":"Percent Log Used", + "counter_type":"65792", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"61" + }, + { + "counter":"Log Growths", + "counter_type":"65792", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"2" + }, + { + "counter":"Log Flushes/sec", + "counter_type":"272696576", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"78" + }, + { + "counter":"Log Flush Wait Time", + "counter_type":"272696576", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"120" + }, + { + "counter":"Log File(s) Used Size (KB)", + "counter_type":"65792", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"629" + }, + { + "counter":"Log File(s) Size (KB)", + "counter_type":"65792", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"1016" + }, + { + "counter":"XTP Memory Used (KB)", + "counter_type":"65792", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Bytes Flushed/sec", + "counter_type":"272696576", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"692224" + }, + { + "counter":"Data File(s) Size (KB)", + "counter_type":"65792", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"15680" + }, + { + "counter":"Backup/Restore Throughput/sec", + "counter_type":"272696576", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active Transactions", + "counter_type":"65792", + "instance":"model_msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active Transactions", + "counter_type":"65792", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Backup/Restore Throughput/sec", + "counter_type":"272696576", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Data File(s) Size (KB)", + "counter_type":"65792", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"4800" + }, + { + "counter":"Log Bytes Flushed/sec", + "counter_type":"272696576", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"970752" + }, + { + "counter":"XTP Memory Used (KB)", + "counter_type":"65792", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log File(s) Size (KB)", + "counter_type":"65792", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"1784" + }, + { + "counter":"Log File(s) Used Size (KB)", + "counter_type":"65792", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"774" + }, + { + "counter":"Log Flush Wait Time", + "counter_type":"272696576", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"138" + }, + { + "counter":"Log Flushes/sec", + "counter_type":"272696576", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"87" + }, + { + "counter":"Log Growths", + "counter_type":"65792", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"5" + }, + { + "counter":"Percent Log Used", + "counter_type":"65792", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"43" + }, + { + "counter":"Transactions/sec", + "counter_type":"272696576", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"1452" + }, + { + "counter":"Write Transactions/sec", + "counter_type":"272696576", + "instance":"model_replicatedmaster", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"36" + }, + { + "counter":"Write Transactions/sec", + "counter_type":"272696576", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"33" + }, + { + "counter":"Transactions/sec", + "counter_type":"272696576", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"4614" + }, + { + "counter":"Percent Log Used", + "counter_type":"65792", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"53" + }, + { + "counter":"Log Growths", + "counter_type":"65792", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"3" + }, + { + "counter":"Log Flushes/sec", + "counter_type":"272696576", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"76" + }, + { + "counter":"Log Flush Wait Time", + "counter_type":"272696576", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"118" + }, + { + "counter":"Log File(s) Used Size (KB)", + "counter_type":"65792", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"678" + }, + { + "counter":"Log File(s) Size (KB)", + "counter_type":"65792", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"1272" + }, + { + "counter":"XTP Memory Used (KB)", + "counter_type":"65792", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Bytes Flushed/sec", + "counter_type":"272696576", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"708608" + }, + { + "counter":"Data File(s) Size (KB)", + "counter_type":"65792", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"15680" + }, + { + "counter":"Backup/Restore Throughput/sec", + "counter_type":"272696576", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active Transactions", + "counter_type":"65792", + "instance":"msdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active Transactions", + "counter_type":"65792", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Backup/Restore Throughput/sec", + "counter_type":"272696576", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Data File(s) Size (KB)", + "counter_type":"65792", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"40960" + }, + { + "counter":"Log Bytes Flushed/sec", + "counter_type":"272696576", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"XTP Memory Used (KB)", + "counter_type":"65792", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log File(s) Size (KB)", + "counter_type":"65792", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"1272" + }, + { + "counter":"Log File(s) Used Size (KB)", + "counter_type":"65792", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"648" + }, + { + "counter":"Log Flush Wait Time", + "counter_type":"272696576", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Flushes/sec", + "counter_type":"272696576", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Growths", + "counter_type":"65792", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Percent Log Used", + "counter_type":"65792", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"50" + }, + { + "counter":"Transactions/sec", + "counter_type":"272696576", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"5" + }, + { + "counter":"Write Transactions/sec", + "counter_type":"272696576", + "instance":"mssqlsystemresource", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Write Transactions/sec", + "counter_type":"272696576", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"244" + }, + { + "counter":"Transactions/sec", + "counter_type":"272696576", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"2335" + }, + { + "counter":"Percent Log Used", + "counter_type":"65792", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"30" + }, + { + "counter":"Log Growths", + "counter_type":"65792", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Flushes/sec", + "counter_type":"272696576", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"49" + }, + { + "counter":"Log Flush Wait Time", + "counter_type":"272696576", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log File(s) Used Size (KB)", + "counter_type":"65792", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"2468" + }, + { + "counter":"Log File(s) Size (KB)", + "counter_type":"65792", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"8184" + }, + { + "counter":"XTP Memory Used (KB)", + "counter_type":"65792", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Log Bytes Flushed/sec", + "counter_type":"272696576", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"2.134016e+06" + }, + { + "counter":"Data File(s) Size (KB)", + "counter_type":"65792", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"65536" + }, + { + "counter":"Backup/Restore Throughput/sec", + "counter_type":"272696576", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active Transactions", + "counter_type":"65792", + "instance":"tempdb", + "measurement":"sqlserver_performance", + "object":"SQLServer:Databases", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Distributed Query", + "counter_type":"65792", + "instance":"Average execution time (ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Exec Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"DTC calls", + "counter_type":"65792", + "instance":"Average execution time (ms)", + "measurement":"sqlserver_performance", + "object":"SQLServer:Exec Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"DTC calls", + "counter_type":"65792", + "instance":"Cumulative execution time (ms) per second", + "measurement":"sqlserver_performance", + "object":"SQLServer:Exec Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Distributed Query", + "counter_type":"65792", + "instance":"Cumulative execution time (ms) per second", + "measurement":"sqlserver_performance", + "object":"SQLServer:Exec Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Distributed Query", + "counter_type":"65792", + "instance":"Execs in progress", + "measurement":"sqlserver_performance", + "object":"SQLServer:Exec Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"DTC calls", + "counter_type":"65792", + "instance":"Execs in progress", + "measurement":"sqlserver_performance", + "object":"SQLServer:Exec Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"DTC calls", + "counter_type":"65792", + "instance":"Execs started per second", + "measurement":"sqlserver_performance", + "object":"SQLServer:Exec Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Distributed Query", + "counter_type":"65792", + "instance":"Execs started per second", + "measurement":"sqlserver_performance", + "object":"SQLServer:Exec Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active Temp Tables", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:General Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"1" + }, + { + "counter":"Logical Connections", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:General Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"3" + }, + { + "counter":"Logins/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:General Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"333" + }, + { + "counter":"Logouts/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:General Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"330" + }, + { + "counter":"Processes blocked", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:General Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Temp Tables Creation Rate", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:General Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"62" + }, + { + "counter":"Temp Tables For Destruction", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:General Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"User Connections", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:General Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"3" + }, + { + "counter":"Average Latch Wait Time (ms)", + "counter_type":"1073874176", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Latches", + "sql_instance":"8cac97ac9b8f", + "value":"423" + }, + { + "counter":"Latch Waits/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Latches", + "sql_instance":"8cac97ac9b8f", + "value":"1036" + }, + { + "counter":"Lock Timeouts (timeout \u003e 0)/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Locks", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Lock Timeouts/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Locks", + "sql_instance":"8cac97ac9b8f", + "value":"1" + }, + { + "counter":"Lock Waits/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Locks", + "sql_instance":"8cac97ac9b8f", + "value":"17" + }, + { + "counter":"Number of Deadlocks/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Locks", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Memory broker clerk size", + "counter_type":"65792", + "instance":"Buffer Pool", + "measurement":"sqlserver_performance", + "object":"SQLServer:Memory Broker Clerks", + "sql_instance":"8cac97ac9b8f", + "value":"4844" + }, + { + "counter":"Memory broker clerk size", + "counter_type":"65792", + "instance":"Column store object pool", + "measurement":"sqlserver_performance", + "object":"SQLServer:Memory Broker Clerks", + "sql_instance":"8cac97ac9b8f", + "value":"16" + }, + { + "counter":"Memory Grants Outstanding", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Memory Manager", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Memory Grants Pending", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Memory Manager", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Target Server Memory (KB)", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Memory Manager", + "sql_instance":"8cac97ac9b8f", + "value":"4.588888e+06" + }, + { + "counter":"Total Server Memory (KB)", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Memory Manager", + "sql_instance":"8cac97ac9b8f", + "value":"376968" + }, + { + "counter":"Query Store CPU usage", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Query Store", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query Store logical reads", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Query Store", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query Store logical writes", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Query Store", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query Store physical reads", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:Query Store", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active memory grant amount (KB)", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"CPU usage %", + "counter_type":"537003264", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Read Bytes/sec", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Used memory (KB)", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"30960" + }, + { + "counter":"Disk Read IO Throttled/sec", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Read IO/sec", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Write Bytes/sec", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Write IO Throttled/sec", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Write IO/sec", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Write IO/sec", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Write IO Throttled/sec", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Write Bytes/sec", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Read IO/sec", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Disk Read IO Throttled/sec", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Used memory (KB)", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"277608" + }, + { + "counter":"Disk Read Bytes/sec", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"CPU usage %", + "counter_type":"537003264", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Active memory grant amount (KB)", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Resource Pool Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Errors/sec", + "counter_type":"272696576", + "instance":"Total", + "measurement":"sqlserver_performance", + "object":"SQLServer:SQL Errors", + "sql_instance":"8cac97ac9b8f", + "value":"1027" + }, + { + "counter":"Errors/sec", + "counter_type":"272696576", + "instance":"DB Offline Errors", + "measurement":"sqlserver_performance", + "object":"SQLServer:SQL Errors", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Errors/sec", + "counter_type":"272696576", + "instance":"Info Errors", + "measurement":"sqlserver_performance", + "object":"SQLServer:SQL Errors", + "sql_instance":"8cac97ac9b8f", + "value":"825" + }, + { + "counter":"Errors/sec", + "counter_type":"272696576", + "instance":"Kill Connection Errors", + "measurement":"sqlserver_performance", + "object":"SQLServer:SQL Errors", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Errors/sec", + "counter_type":"272696576", + "instance":"User Errors", + "measurement":"sqlserver_performance", + "object":"SQLServer:SQL Errors", + "sql_instance":"8cac97ac9b8f", + "value":"202" + }, + { + "counter":"Batch Requests/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:SQL Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"3375" + }, + { + "counter":"SQL Compilations/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:SQL Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"413" + }, + { + "counter":"SQL Re-Compilations/sec", + "counter_type":"272696576", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:SQL Statistics", + "sql_instance":"8cac97ac9b8f", + "value":"63" + }, + { + "counter":"Free Space in tempdb (KB)", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Transactions", + "sql_instance":"8cac97ac9b8f", + "value":"61824" + }, + { + "counter":"Version Store Size (KB)", + "counter_type":"65792", + "instance":"", + "measurement":"sqlserver_performance", + "object":"SQLServer:Transactions", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 1", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 10", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 2", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 3", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 4", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 5", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 6", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 7", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 8", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Query", + "counter_type":"65792", + "instance":"User counter 9", + "measurement":"sqlserver_performance", + "object":"SQLServer:User Settable", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Blocked tasks", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Workload Group Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"CPU usage %", + "counter_type":"537003264", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Workload Group Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Queued requests", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Workload Group Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Requests completed/sec", + "counter_type":"65792", + "instance":"default", + "measurement":"sqlserver_performance", + "object":"SQLServer:Workload Group Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Requests completed/sec", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Workload Group Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Queued requests", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Workload Group Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"CPU usage %", + "counter_type":"537003264", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Workload Group Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + }, + { + "counter":"Blocked tasks", + "counter_type":"65792", + "instance":"internal", + "measurement":"sqlserver_performance", + "object":"SQLServer:Workload Group Stats", + "sql_instance":"8cac97ac9b8f", + "value":"0" + } +] diff --git a/receiver/sqlserverreceiver/testdata/perfCounterQueryWithInstanceName.txt b/receiver/sqlserverreceiver/testdata/perfCounterQueryWithInstanceName.txt new file mode 100644 index 000000000000..7e03e7319dd8 --- /dev/null +++ b/receiver/sqlserverreceiver/testdata/perfCounterQueryWithInstanceName.txt @@ -0,0 +1,165 @@ + +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterprise,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard, Enterprise or Express. This query is only supported on these editions.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE + @SqlStatement AS nvarchar(max) + ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128) + ,[counter_name] nvarchar(128) + ,[instance_name] nvarchar(128) + ,[cntr_value] bigint + ,[cntr_type] int + PRIMARY KEY([object_name], [counter_name], [instance_name]) +); + +WITH PerfCounters AS ( +SELECT DISTINCT + RTRIM(spi.[object_name]) [object_name] + ,RTRIM(spi.[counter_name]) [counter_name] + ,RTRIM(spi.[instance_name]) AS [instance_name] + ,CAST(spi.[cntr_value] AS bigint) AS [cntr_value] + ,spi.[cntr_type] + FROM sys.dm_os_performance_counters AS spi + WHERE + counter_name IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Average Latch Wait Time (ms)' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Database Pages' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' + ,'Query Store physical reads' + ,'Query Store logical reads' + ,'Query Store logical writes' + ) OR ( + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + spi.[instance_name] IN ('_Total') + AND spi.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) +) + +INSERT INTO @PCounters SELECT * FROM PerfCounters; + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] + ,CAST(pc.[cntr_type] AS varchar(25)) AS [counter_type] +FROM @PCounters AS pc +LEFT OUTER JOIN @PCounters AS pc1 + ON ( + pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') + OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') + ) + AND pc.[object_name] = pc1.[object_name] + AND pc.[instance_name] = pc1.[instance_name] + AND pc1.[counter_name] LIKE '%base' +WHERE + pc.[counter_name] NOT LIKE '% base' + AND @@SERVERNAME = 'instanceName' +OPTION(RECOMPILE) diff --git a/receiver/sqlserverreceiver/testdata/perfCounterQueryWithoutInstanceName.txt b/receiver/sqlserverreceiver/testdata/perfCounterQueryWithoutInstanceName.txt new file mode 100644 index 000000000000..0f8e4f7da9d5 --- /dev/null +++ b/receiver/sqlserverreceiver/testdata/perfCounterQueryWithoutInstanceName.txt @@ -0,0 +1,165 @@ + +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterprise,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard, Enterprise or Express. This query is only supported on these editions.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE + @SqlStatement AS nvarchar(max) + ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128) + ,[counter_name] nvarchar(128) + ,[instance_name] nvarchar(128) + ,[cntr_value] bigint + ,[cntr_type] int + PRIMARY KEY([object_name], [counter_name], [instance_name]) +); + +WITH PerfCounters AS ( +SELECT DISTINCT + RTRIM(spi.[object_name]) [object_name] + ,RTRIM(spi.[counter_name]) [counter_name] + ,RTRIM(spi.[instance_name]) AS [instance_name] + ,CAST(spi.[cntr_value] AS bigint) AS [cntr_value] + ,spi.[cntr_type] + FROM sys.dm_os_performance_counters AS spi + WHERE + counter_name IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Average Latch Wait Time (ms)' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Database Pages' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' + ,'Query Store physical reads' + ,'Query Store logical reads' + ,'Query Store logical writes' + ) OR ( + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + spi.[instance_name] IN ('_Total') + AND spi.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) +) + +INSERT INTO @PCounters SELECT * FROM PerfCounters; + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] + ,CAST(pc.[cntr_type] AS varchar(25)) AS [counter_type] +FROM @PCounters AS pc +LEFT OUTER JOIN @PCounters AS pc1 + ON ( + pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') + OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') + ) + AND pc.[object_name] = pc1.[object_name] + AND pc.[instance_name] = pc1.[instance_name] + AND pc1.[counter_name] LIKE '%base' +WHERE + pc.[counter_name] NOT LIKE '% base' + +OPTION(RECOMPILE) From c6a6bd43875769d7400e8462e63730fe1a49ab11 Mon Sep 17 00:00:00 2001 From: sh0rez Date: Tue, 14 May 2024 13:20:50 +0200 Subject: [PATCH 03/53] deltatocumulative: exponential histograms (#32030) **Description:** Implements accumulation of exponential histograms by adding bucket-per-bucket. - [x] Align bucket offset to the smaller one - [x] Merge buckets by adding up each buckets count - [x] Widen zero buckets so they are the same - [x] Adjust scale to the lowest one **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30705 **Testing:** Extensive tests have been added to the `internal/data` package **Documentation:** not needed --- .chloggen/deltatocumulative-exphist.yaml | 29 ++++ processor/deltatocumulativeprocessor/go.mod | 5 + .../internal/data/add.go | 51 ++++++- .../internal/data/data.go | 10 +- .../internal/data/expo/expo.go | 52 +++++++ .../internal/data/expo/expo_test.go | 63 +++++++++ .../internal/data/expo/expotest/bins.go | 81 +++++++++++ .../internal/data/expo/expotest/equal.go | 115 +++++++++++++++ .../internal/data/expo/expotest/equal_test.go | 73 ++++++++++ .../internal/data/expo/expotest/histogram.go | 65 +++++++++ .../internal/data/expo/merge.go | 37 +++++ .../internal/data/expo/merge_test.go | 53 +++++++ .../internal/data/expo/ord.go | 16 +++ .../internal/data/expo/ord_test.go | 40 ++++++ .../internal/data/expo/scale.go | 115 +++++++++++++++ .../internal/data/expo/scale_test.go | 90 ++++++++++++ .../internal/data/expo/zero.go | 68 +++++++++ .../internal/data/expo/zero_test.go | 125 +++++++++++++++++ .../internal/data/expo_test.go | 131 ++++++++++++++++++ .../internal/metrics/data.go | 2 +- .../internal/telemetry/metrics.go | 15 +- .../deltatocumulativeprocessor/processor.go | 59 +++++--- 22 files changed, 1263 insertions(+), 32 deletions(-) create mode 100644 .chloggen/deltatocumulative-exphist.yaml create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/expo.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/expo_test.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/expotest/bins.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/expotest/equal.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/expotest/equal_test.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/expotest/histogram.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/merge.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/merge_test.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/ord.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/ord_test.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/scale.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/scale_test.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/zero.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo/zero_test.go create mode 100644 processor/deltatocumulativeprocessor/internal/data/expo_test.go diff --git a/.chloggen/deltatocumulative-exphist.yaml b/.chloggen/deltatocumulative-exphist.yaml new file mode 100644 index 000000000000..7dfa30bf54e4 --- /dev/null +++ b/.chloggen/deltatocumulative-exphist.yaml @@ -0,0 +1,29 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: "enhancement" + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: deltatocumulativeprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: exponential histogram accumulation + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [31340] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + accumulates exponential histogram datapoints by adding respective bucket counts. + also handles downscaling, changing zero-counts, offset adaptions and optional fields + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/processor/deltatocumulativeprocessor/go.mod b/processor/deltatocumulativeprocessor/go.mod index 859bfc1eb028..a49a75e803b6 100644 --- a/processor/deltatocumulativeprocessor/go.mod +++ b/processor/deltatocumulativeprocessor/go.mod @@ -4,6 +4,7 @@ go 1.21.0 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.100.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.100.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.100.0 go.opentelemetry.io/collector/confmap v0.100.0 @@ -58,4 +59,8 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest + replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics => ../../internal/exp/metrics + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden diff --git a/processor/deltatocumulativeprocessor/internal/data/add.go b/processor/deltatocumulativeprocessor/internal/data/add.go index b40bf05b916d..94a575b1bd9f 100644 --- a/processor/deltatocumulativeprocessor/internal/data/add.go +++ b/processor/deltatocumulativeprocessor/internal/data/add.go @@ -3,7 +3,13 @@ package data // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data" -import "go.opentelemetry.io/collector/pdata/pmetric" +import ( + "math" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" +) func (dp Number) Add(in Number) Number { switch in.ValueType() { @@ -23,7 +29,46 @@ func (dp Histogram) Add(in Histogram) Histogram { panic("todo") } -// nolint func (dp ExpHistogram) Add(in ExpHistogram) ExpHistogram { - panic("todo") + type H = ExpHistogram + + if dp.Scale() != in.Scale() { + hi, lo := expo.HiLo(dp, in, H.Scale) + from, to := expo.Scale(hi.Scale()), expo.Scale(lo.Scale()) + expo.Downscale(hi.Positive(), from, to) + expo.Downscale(hi.Negative(), from, to) + hi.SetScale(lo.Scale()) + } + + if dp.ZeroThreshold() != in.ZeroThreshold() { + hi, lo := expo.HiLo(dp, in, H.ZeroThreshold) + expo.WidenZero(lo.DataPoint, hi.ZeroThreshold()) + } + + expo.Merge(dp.Positive(), in.Positive()) + expo.Merge(dp.Negative(), in.Negative()) + + dp.SetTimestamp(in.Timestamp()) + dp.SetCount(dp.Count() + in.Count()) + dp.SetZeroCount(dp.ZeroCount() + in.ZeroCount()) + + if dp.HasSum() && in.HasSum() { + dp.SetSum(dp.Sum() + in.Sum()) + } else { + dp.RemoveSum() + } + + if dp.HasMin() && in.HasMin() { + dp.SetMin(math.Min(dp.Min(), in.Min())) + } else { + dp.RemoveMin() + } + + if dp.HasMax() && in.HasMax() { + dp.SetMax(math.Max(dp.Max(), in.Max())) + } else { + dp.RemoveMax() + } + + return dp } diff --git a/processor/deltatocumulativeprocessor/internal/data/data.go b/processor/deltatocumulativeprocessor/internal/data/data.go index 941b3cff904f..eade94eadf92 100644 --- a/processor/deltatocumulativeprocessor/internal/data/data.go +++ b/processor/deltatocumulativeprocessor/internal/data/data.go @@ -6,6 +6,8 @@ package data // import "github.com/open-telemetry/opentelemetry-collector-contri import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" ) type Point[Self any] interface { @@ -52,19 +54,19 @@ func (dp Histogram) CopyTo(dst Histogram) { } type ExpHistogram struct { - pmetric.ExponentialHistogramDataPoint + expo.DataPoint } func (dp ExpHistogram) Clone() ExpHistogram { - clone := ExpHistogram{ExponentialHistogramDataPoint: pmetric.NewExponentialHistogramDataPoint()} - if dp.ExponentialHistogramDataPoint != (pmetric.ExponentialHistogramDataPoint{}) { + clone := ExpHistogram{DataPoint: pmetric.NewExponentialHistogramDataPoint()} + if dp.DataPoint != (expo.DataPoint{}) { dp.CopyTo(clone) } return clone } func (dp ExpHistogram) CopyTo(dst ExpHistogram) { - dp.ExponentialHistogramDataPoint.CopyTo(dst.ExponentialHistogramDataPoint) + dp.DataPoint.CopyTo(dst.DataPoint) } type mustPoint[D Point[D]] struct{ _ D } diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/expo.go b/processor/deltatocumulativeprocessor/internal/data/expo/expo.go new file mode 100644 index 000000000000..2011e3cd811e --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/expo.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package expo implements various operations on exponential histograms and their bucket counts +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import "go.opentelemetry.io/collector/pdata/pmetric" + +type ( + DataPoint = pmetric.ExponentialHistogramDataPoint + Buckets = pmetric.ExponentialHistogramDataPointBuckets +) + +// Abs returns a view into the buckets using an absolute scale +func Abs(bs Buckets) Absolute { + return Absolute{buckets: bs} +} + +type buckets = Buckets + +// Absolute addresses bucket counts using an absolute scale, such that it is +// interoperable with [Scale]. +// +// It spans from [[Absolute.Lower]:[Absolute.Upper]] +// +// NOTE: The zero-value is unusable, use [Abs] to construct +type Absolute struct { + buckets +} + +// Abs returns the value at absolute index 'at' +func (a Absolute) Abs(at int) uint64 { + if i, ok := a.idx(at); ok { + return a.BucketCounts().At(i) + } + return 0 +} + +// Upper returns the minimal index outside the set, such that every i < Upper +func (a Absolute) Upper() int { + return a.BucketCounts().Len() + int(a.Offset()) +} + +// Lower returns the minimal index inside the set, such that every i >= Lower +func (a Absolute) Lower() int { + return int(a.Offset()) +} + +func (a Absolute) idx(at int) (int, bool) { + idx := at - a.Lower() + return idx, idx >= 0 && idx < a.BucketCounts().Len() +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/expo_test.go b/processor/deltatocumulativeprocessor/internal/data/expo/expo_test.go new file mode 100644 index 000000000000..d7eb0cb2e9b3 --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/expo_test.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo_test + +import ( + "fmt" + "testing" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expotest" +) + +func TestAbsolute(t *testing.T) { + is := expotest.Is(t) + + bs := expotest.Bins{ø, 1, 2, 3, 4, 5, ø, ø}.Into() + abs := expo.Abs(bs) + + lo, up := abs.Lower(), abs.Upper() + is.Equalf(-2, lo, "lower-bound") + is.Equalf(3, up, "upper-bound") + + for i := lo; i < up; i++ { + got := abs.Abs(i) + is.Equal(bs.BucketCounts().At(i+2), got) + } +} + +func ExampleAbsolute() { + nums := []float64{0.4, 2.3, 2.4, 4.5} + + bs := expotest.Observe0(nums...) + abs := expo.Abs(bs) + + s := expo.Scale(0) + for _, n := range nums { + fmt.Printf("%.1f belongs to bucket %+d\n", n, s.Idx(n)) + } + + fmt.Printf("\n index:") + for i := 0; i < bs.BucketCounts().Len(); i++ { + fmt.Printf(" %d", i) + } + fmt.Printf("\n abs:") + for i := abs.Lower(); i < abs.Upper(); i++ { + fmt.Printf(" %+d", i) + } + fmt.Printf("\ncounts:") + for i := abs.Lower(); i < abs.Upper(); i++ { + fmt.Printf(" %d", abs.Abs(i)) + } + + // Output: + // 0.4 belongs to bucket -2 + // 2.3 belongs to bucket +1 + // 2.4 belongs to bucket +1 + // 4.5 belongs to bucket +2 + // + // index: 0 1 2 3 4 + // abs: -2 -1 +0 +1 +2 + // counts: 1 0 0 2 1 +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/expotest/bins.go b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/bins.go new file mode 100644 index 000000000000..13b4ce74c928 --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/bins.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expotest // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expotest" + +import ( + "fmt" + "math" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" +) + +const ( + Empty = math.MaxUint64 + ø = Empty +) + +// index: 0 1 2 3 4 5 6 7 +// bucket: -3 -2 -1 0 1 2 3 4 +// bounds: (0.125,0.25], (0.25,0.5], (0.5,1], (1,2], (2,4], (4,8], (8,16], (16,32] +type Bins [8]uint64 + +func (bins Bins) Into() expo.Buckets { + start := 0 + for i := 0; i < len(bins); i++ { + if bins[i] != ø { + start = i + break + } + } + + end := len(bins) + for i := start; i < len(bins); i++ { + if bins[i] == ø { + end = i + break + } + } + + counts := bins[start:end] + + buckets := pmetric.NewExponentialHistogramDataPointBuckets() + buckets.SetOffset(int32(start - 3)) + buckets.BucketCounts().FromRaw(counts) + return buckets +} + +func ObserveInto(bs expo.Buckets, scale expo.Scale, pts ...float64) { + counts := bs.BucketCounts() + + for _, pt := range pts { + pt = math.Abs(pt) + if pt <= 0.125 || pt > 32 { + panic(fmt.Sprintf("out of bounds: 0.125 < %f <= 32", pt)) + } + + idx := scale.Idx(pt) - int(bs.Offset()) + switch { + case idx < 0: + bs.SetOffset(bs.Offset() + int32(idx)) + counts.FromRaw(append(make([]uint64, -idx), counts.AsRaw()...)) + idx = 0 + case idx >= counts.Len(): + counts.Append(make([]uint64, idx-counts.Len()+1)...) + } + + counts.SetAt(idx, counts.At(idx)+1) + } +} + +func Observe(scale expo.Scale, pts ...float64) expo.Buckets { + bs := pmetric.NewExponentialHistogramDataPointBuckets() + ObserveInto(bs, scale, pts...) + return bs +} + +func Observe0(pts ...float64) expo.Buckets { + return Observe(0, pts...) +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/expotest/equal.go b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/equal.go new file mode 100644 index 000000000000..c34e7c1665bc --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/equal.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expotest // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expotest" + +import ( + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" +) + +// T is the testing helper. Most notably it provides [T.Equal] +type T struct { + testing.TB +} + +func Is(t testing.TB) T { + return T{TB: t} +} + +// Equal reports whether want and got are deeply equal. +// +// Unlike [reflect.DeepEqual] it first recursively checks exported fields +// and "getters", which are defined as an exported method with: +// - exactly zero input arguments +// - exactly one return value +// - does not start with 'Append' +// +// If this yields differences, those are reported and the test fails. +// If the compared values are [pmetric.ExponentialHistogramDataPoint], then +// [pmetrictest.CompareExponentialHistogramDataPoint] is also called. +// +// If no differences are found, it falls back to [assert.Equal]. +// +// This was done to aid readability when comparing deeply nested [pmetric]/[pcommon] types, +// because in many cases [assert.Equal] output was found to be barely understandable. +func (is T) Equal(want, got any) { + is.Helper() + equal(is.TB, want, got, "") +} + +func (is T) Equalf(want, got any, name string) { + is.Helper() + equal(is.TB, want, got, name) +} + +func equal(t testing.TB, want, got any, name string) bool { + t.Helper() + require.IsType(t, want, got) + + vw := reflect.ValueOf(want) + vg := reflect.ValueOf(got) + + if vw.Kind() != reflect.Struct { + ok := reflect.DeepEqual(want, got) + if !ok { + t.Errorf("%s: %+v != %+v", name, want, got) + } + return ok + } + + ok := true + // compare all "getters" of the struct + for i := 0; i < vw.NumMethod(); i++ { + mname := vw.Type().Method(i).Name + fname := strings.TrimPrefix(name+"."+mname+"()", ".") + + mw := vw.Method(i) + mg := vg.Method(i) + + // only compare "getters" + if mw.Type().NumIn() != 0 || mw.Type().NumOut() != 1 { + continue + } + // Append(Empty) fails above heuristic, exclude it + if strings.HasPrefix(mname, "Append") { + continue + } + + rw := mw.Call(nil)[0].Interface() + rg := mg.Call(nil)[0].Interface() + + ok = equal(t, rw, rg, fname) && ok + } + + // compare all exported fields of the struct + for i := 0; i < vw.NumField(); i++ { + if !vw.Type().Field(i).IsExported() { + continue + } + fname := name + "." + vw.Type().Field(i).Name + fw := vw.Field(i).Interface() + fg := vg.Field(i).Interface() + ok = equal(t, fw, fg, fname) && ok + } + if !ok { + return false + } + + if _, ok := want.(expo.DataPoint); ok { + err := pmetrictest.CompareExponentialHistogramDataPoint(want.(expo.DataPoint), got.(expo.DataPoint)) + if err != nil { + t.Error(err) + } + } + + // fallback to a full deep-equal for rare cases (unexported fields, etc) + return assert.Equal(t, want, got) +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/expotest/equal_test.go b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/equal_test.go new file mode 100644 index 000000000000..7fb7c42b586e --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/equal_test.go @@ -0,0 +1,73 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expotest + +import ( + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "testing" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" +) + +var t testing.TB = fakeT{} + +var expotest = struct { + Is func(t testing.TB) T + Observe func(expo.Scale, ...float64) expo.Buckets +}{ + Is: Is, + Observe: Observe, +} + +func ExampleT_Equal() { + is := expotest.Is(t) + + want := Histogram{ + PosNeg: expotest.Observe(expo.Scale(0), 1, 2, 3, 4), + Scale: 0, + }.Into() + + got := Histogram{ + PosNeg: expotest.Observe(expo.Scale(1), 1, 1, 1, 1), + Scale: 1, + }.Into() + + is.Equal(want, got) + + // Output: + // equal_test.go:40: Negative().BucketCounts().AsRaw(): [1 1 2] != [4] + // equal_test.go:40: Negative().BucketCounts().Len(): 3 != 1 + // equal_test.go:40: Positive().BucketCounts().AsRaw(): [1 1 2] != [4] + // equal_test.go:40: Positive().BucketCounts().Len(): 3 != 1 + // equal_test.go:40: Scale(): 0 != 1 +} + +func TestNone(*testing.T) {} + +type fakeT struct { + testing.TB +} + +func (t fakeT) Helper() {} + +func (t fakeT) Errorf(format string, args ...any) { + var from string + for i := 0; ; i++ { + pc, file, line, ok := runtime.Caller(i) + if !ok { + break + } + fn := runtime.FuncForPC(pc) + if strings.HasSuffix(fn.Name(), ".ExampleT_Equal") { + from = filepath.Base(file) + ":" + strconv.Itoa(line) + break + } + } + + fmt.Printf("%s: %s\n", from, fmt.Sprintf(format, args...)) +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/expotest/histogram.go b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/histogram.go new file mode 100644 index 000000000000..141dad724d82 --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/histogram.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expotest // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expotest" + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" +) + +type Histogram struct { + Ts pcommon.Timestamp + + Pos, Neg expo.Buckets + PosNeg expo.Buckets + + Scale int + Count uint64 + Sum *float64 + + Min, Max *float64 + + Zt float64 + Zc uint64 +} + +func (hist Histogram) Into() expo.DataPoint { + dp := pmetric.NewExponentialHistogramDataPoint() + dp.SetTimestamp(hist.Ts) + + if !zero(hist.PosNeg) { + hist.PosNeg.CopyTo(dp.Positive()) + hist.PosNeg.CopyTo(dp.Negative()) + } + + if !zero(hist.Pos) { + hist.Pos.MoveTo(dp.Positive()) + } + if !zero(hist.Neg) { + hist.Neg.MoveTo(dp.Negative()) + } + + dp.SetCount(hist.Count) + if hist.Sum != nil { + dp.SetSum(*hist.Sum) + } + + if hist.Min != nil { + dp.SetMin(*hist.Min) + } + if hist.Max != nil { + dp.SetMax(*hist.Max) + } + + dp.SetScale(int32(hist.Scale)) + dp.SetZeroThreshold(hist.Zt) + dp.SetZeroCount(hist.Zc) + return dp +} + +func zero[T comparable](v T) bool { + return v == *new(T) +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/merge.go b/processor/deltatocumulativeprocessor/internal/data/expo/merge.go new file mode 100644 index 000000000000..150e29a65819 --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/merge.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Merge combines the counts of buckets a and b into a. +// Both buckets MUST be of same scale +func Merge(arel, brel Buckets) { + if brel.BucketCounts().Len() == 0 { + return + } + if arel.BucketCounts().Len() == 0 { + brel.CopyTo(arel) + return + } + + a, b := Abs(arel), Abs(brel) + + lo := min(a.Lower(), b.Lower()) + up := max(a.Upper(), b.Upper()) + + size := up - lo + + counts := pcommon.NewUInt64Slice() + counts.Append(make([]uint64, size-counts.Len())...) + + for i := 0; i < counts.Len(); i++ { + counts.SetAt(i, a.Abs(lo+i)+b.Abs(lo+i)) + } + + a.SetOffset(int32(lo)) + counts.MoveTo(a.BucketCounts()) +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/merge_test.go b/processor/deltatocumulativeprocessor/internal/data/expo/merge_test.go new file mode 100644 index 000000000000..4d3791721bcd --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/merge_test.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo_test + +import ( + "fmt" + "testing" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expotest" +) + +const ø = expotest.Empty + +type bins = expotest.Bins + +func TestMerge(t *testing.T) { + cases := []struct { + a, b bins + want bins + }{{ + // -3 -2 -1 0 1 2 3 4 + a: bins{ø, ø, ø, ø, ø, ø, ø, ø}, + b: bins{ø, ø, ø, ø, ø, ø, ø, ø}, + want: bins{ø, ø, ø, ø, ø, ø, ø, ø}, + }, { + a: bins{ø, ø, 1, 1, 1, ø, ø, ø}, + b: bins{ø, 1, 1, ø, ø, ø, ø, ø}, + want: bins{ø, 1, 2, 1, 1, ø, ø, ø}, + }, { + a: bins{ø, ø, ø, ø, 1, 1, 1, ø}, + b: bins{ø, ø, ø, ø, 1, 1, 1, ø}, + want: bins{ø, ø, ø, ø, 2, 2, 2, ø}, + }, { + a: bins{ø, 1, 1, ø, ø, ø, ø, ø}, + b: bins{ø, ø, ø, ø, 1, 1, ø, ø}, + want: bins{ø, 1, 1, 0, 1, 1, ø, ø}, + }} + + for _, cs := range cases { + a := cs.a.Into() + b := cs.b.Into() + want := cs.want.Into() + + name := fmt.Sprintf("(%+d,%d)+(%+d,%d)=(%+d,%d)", a.Offset(), a.BucketCounts().Len(), b.Offset(), b.BucketCounts().Len(), want.Offset(), want.BucketCounts().Len()) + t.Run(name, func(t *testing.T) { + expo.Merge(a, b) + is := expotest.Is(t) + is.Equal(want, a) + }) + } +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/ord.go b/processor/deltatocumulativeprocessor/internal/data/expo/ord.go new file mode 100644 index 000000000000..34d177be1795 --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/ord.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import "cmp" + +// HiLo returns the greater of a and b by comparing the result of applying fn to +// each. If equal, returns operands as passed +func HiLo[T any, N cmp.Ordered](a, b T, fn func(T) N) (hi, lo T) { + an, bn := fn(a), fn(b) + if cmp.Less(an, bn) { + return b, a + } + return a, b +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/ord_test.go b/processor/deltatocumulativeprocessor/internal/data/expo/ord_test.go new file mode 100644 index 000000000000..dedc60b50f27 --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/ord_test.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" +) + +func TestHiLo(t *testing.T) { + type T struct { + int int + str string + } + + a := T{int: 0, str: "foo"} + b := T{int: 1, str: "bar"} + + { + hi, lo := expo.HiLo(a, b, func(v T) int { return v.int }) + assert.Equal(t, a, lo) + assert.Equal(t, b, hi) + } + + { + hi, lo := expo.HiLo(a, b, func(v T) string { return v.str }) + assert.Equal(t, b, lo) + assert.Equal(t, a, hi) + } + + { + hi, lo := expo.HiLo(a, b, func(T) int { return 0 }) + assert.Equal(t, a, hi) + assert.Equal(t, b, lo) + } +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/scale.go b/processor/deltatocumulativeprocessor/internal/data/expo/scale.go new file mode 100644 index 000000000000..ac075158dc3c --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/scale.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import ( + "fmt" + "math" +) + +type Scale int32 + +// Idx gives the bucket index v belongs into +func (scale Scale) Idx(v float64) int { + // from: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#all-scales-use-the-logarithm-function + + // Special case for power-of-two values. + if frac, exp := math.Frexp(v); frac == 0.5 { + return ((exp - 1) << scale) - 1 + } + + scaleFactor := math.Ldexp(math.Log2E, int(scale)) + // Note: math.Floor(value) equals math.Ceil(value)-1 when value + // is not a power of two, which is checked above. + return int(math.Floor(math.Log(v) * scaleFactor)) +} + +// Bounds returns the half-open interval (min,max] of the bucket at index. +// This means a value min < v <= max belongs to this bucket. +// +// NOTE: this is different from Go slice intervals, which are [a,b) +func (scale Scale) Bounds(index int) (min, max float64) { + // from: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#all-scales-use-the-logarithm-function + lower := func(index int) float64 { + inverseFactor := math.Ldexp(math.Ln2, int(-scale)) + return math.Exp(float64(index) * inverseFactor) + } + + return lower(index), lower(index + 1) +} + +// Downscale collapses the buckets of bs until scale 'to' is reached +func Downscale(bs Buckets, from, to Scale) { + switch { + case from == to: + return + case from < to: + // because even distribution within the buckets cannot be assumed, it is + // not possible to correctly upscale (split) buckets. + // any attempt to do so would yield erronous data. + panic(fmt.Sprintf("cannot upscale without introducing error (%d -> %d)", from, to)) + } + + for at := from; at > to; at-- { + Collapse(bs) + } +} + +// Collapse merges adjacent buckets and zeros the remaining area: +// +// before: 1 1 1 1 1 1 1 1 1 1 1 1 +// after: 2 2 2 2 2 2 0 0 0 0 0 0 +// +// Due to the "perfect subsetting" property of exponential histograms, this +// gives the same observation as before, but recorded at scale-1. See +// https://opentelemetry.io/docs/specs/otel/metrics/data-model/#exponential-scale. +// +// Because every bucket now spans twice as much range, half of the allocated +// counts slice is technically no longer required. It is zeroed but left in +// place to avoid future allocations, because observations may happen in that +// area at a later time. +func Collapse(bs Buckets) { + counts := bs.BucketCounts() + size := counts.Len() / 2 + if counts.Len()%2 != 0 { + size++ + } + + // merging needs to happen in pairs aligned to i=0. if offset is non-even, + // we need to shift the whole merging by one to make above condition true. + shift := 0 + if bs.Offset()%2 != 0 { + bs.SetOffset(bs.Offset() - 1) + shift-- + } + bs.SetOffset(bs.Offset() / 2) + + for i := 0; i < size; i++ { + // size is ~half of len. we add two buckets per iteration. + // k jumps in steps of 2, shifted if offset makes this necessary. + k := i*2 + shift + + // special case: we just started and had to shift. the left half of the + // new bucket is not actually stored, so only use counts[0]. + if i == 0 && k == -1 { + counts.SetAt(i, counts.At(k+1)) + continue + } + + // new[k] = old[k]+old[k+1] + counts.SetAt(i, counts.At(k)) + if k+1 < counts.Len() { + counts.SetAt(i, counts.At(k)+counts.At(k+1)) + } + } + + // zero the excess area. its not needed to represent the observation + // anymore, but kept for two reasons: + // 1. future observations may need it, no need to re-alloc then if kept + // 2. [pcommon.Uint64Slice] can not, in fact, be sliced, so getting rid + // of it would alloc ¯\_(ツ)_/¯ + for i := size; i < counts.Len(); i++ { + counts.SetAt(i, 0) + } +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/scale_test.go b/processor/deltatocumulativeprocessor/internal/data/expo/scale_test.go new file mode 100644 index 000000000000..ceb76eb1d44d --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/scale_test.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo_test + +import ( + "fmt" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expotest" +) + +func TestDownscale(t *testing.T) { + type Repr[T any] struct { + scale expo.Scale + bkt T + } + + cases := [][]Repr[string]{{ + {scale: 2, bkt: "1 1 1 1 1 1 1 1 1 1 1 1"}, + {scale: 1, bkt: " 2 2 2 2 2 2 "}, + {scale: 0, bkt: " 4 4 4 "}, + }, { + {scale: 2, bkt: "ø 1 1 1 1 1 1 1 1 1 1 1"}, + {scale: 1, bkt: " 1 2 2 2 2 2 "}, + {scale: 0, bkt: " 3 4 4 "}, + }, { + {scale: 2, bkt: "ø ø 1 1 1 1 1 1 1 1 1 1"}, + {scale: 1, bkt: " ø 2 2 2 2 2 "}, + {scale: 0, bkt: " 2 4 4 "}, + }, { + {scale: 2, bkt: "ø ø ø ø 1 1 1 1 1 1 1 1"}, + {scale: 1, bkt: " ø ø 2 2 2 2 "}, + {scale: 0, bkt: " ø 4 4 "}, + }, { + {scale: 2, bkt: "1 1 1 1 1 1 1 1 1 "}, + {scale: 1, bkt: " 2 2 2 2 1 "}, + {scale: 0, bkt: " 4 4 1 "}, + }, { + {scale: 2, bkt: "1 1 1 1 1 1 1 1 1 1 1 1"}, + {scale: 0, bkt: " 4 4 4 "}, + }} + + type B = expo.Buckets + for i, reprs := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + buckets := make([]Repr[B], len(reprs)) + for i, r := range reprs { + bkt := pmetric.NewExponentialHistogramDataPointBuckets() + for _, elem := range strings.Fields(r.bkt) { + if elem == "ø" { + bkt.SetOffset(bkt.Offset() + 1) + continue + } + n, err := strconv.Atoi(elem) + if err != nil { + panic(err) + } + bkt.BucketCounts().Append(uint64(n)) + } + buckets[i] = Repr[B]{scale: r.scale, bkt: bkt} + } + + is := expotest.Is(t) + for i := 0; i < len(buckets)-1; i++ { + expo.Downscale(buckets[i].bkt, buckets[i].scale, buckets[i+1].scale) + + is.Equalf(buckets[i+1].bkt.Offset(), buckets[i].bkt.Offset(), "offset") + + want := buckets[i+1].bkt.BucketCounts().AsRaw() + got := buckets[i].bkt.BucketCounts().AsRaw() + + is.Equalf(want, got[:len(want)], "counts") + is.Equalf(make([]uint64, len(got)-len(want)), got[len(want):], "extra-space") + } + }) + } + + t.Run("panics", func(t *testing.T) { + assert.PanicsWithValue(t, "cannot upscale without introducing error (8 -> 12)", func() { + expo.Downscale(bins{}.Into(), 8, 12) + }) + }) +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/zero.go b/processor/deltatocumulativeprocessor/internal/data/expo/zero.go new file mode 100644 index 000000000000..2d5401b39f5c --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/zero.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + +import ( + "cmp" + "fmt" +) + +// WidenZero widens the zero-bucket to span at least [-width,width], possibly wider +// if min falls in the middle of a bucket. +// +// Both buckets counts MUST be of same scale. +func WidenZero(dp DataPoint, width float64) { + switch { + case width == dp.ZeroThreshold(): + return + case width < dp.ZeroThreshold(): + panic(fmt.Sprintf("min must be larger than current threshold (%f)", dp.ZeroThreshold())) + } + + scale := Scale(dp.Scale()) + zero := scale.Idx(width) // the largest bucket index inside the zero width + + widen := func(bs Buckets) { + abs := Abs(bs) + for i := abs.Lower(); i <= zero; i++ { + dp.SetZeroCount(dp.ZeroCount() + abs.Abs(i)) + } + + // right next to the new zero bucket, constrained to slice range + lo := clamp(zero+1, abs.Lower(), abs.Upper()) + abs.Slice(lo, abs.Upper()) + } + + widen(dp.Positive()) + widen(dp.Negative()) + + _, max := scale.Bounds(zero) + dp.SetZeroThreshold(max) +} + +// Slice drops data outside the range from <= i < to from the bucket counts. It behaves the same as Go's [a:b] +// +// Limitations: +// - due to a limitation of the pcommon package, slicing cannot happen in-place and allocates +// - in consequence, data outside the range is garbage collected +func (a Absolute) Slice(from, to int) { + lo, up := a.Lower(), a.Upper() + switch { + case from > to: + panic(fmt.Sprintf("bad bounds: must be from<=to (got %d<=%d)", from, to)) + case from < lo || to > up: + panic(fmt.Sprintf("%d:%d is out of bounds for %d:%d", from, to, lo, up)) + } + + first := from - lo + last := to - lo + + a.BucketCounts().FromRaw(a.BucketCounts().AsRaw()[first:last]) + a.SetOffset(int32(from)) +} + +// clamp constraints v to the range up..=lo +func clamp[N cmp.Ordered](v, lo, up N) N { + return max(lo, min(v, up)) +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/zero_test.go b/processor/deltatocumulativeprocessor/internal/data/expo/zero_test.go new file mode 100644 index 000000000000..92e9d88a38d1 --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo/zero_test.go @@ -0,0 +1,125 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package expo_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expotest" +) + +type hist = expotest.Histogram + +func TestWidenZero(t *testing.T) { + cases := []struct { + name string + hist hist + want hist + min float64 + }{{ + // -3 -2 -1 0 1 2 3 4 + // (0.125,0.25], (0.25,0.5], (0.5,1], (1,2], (2,4], (4,8], (8,16], (16,32] + // + // -3 -2 -1 0 1 2 3 4 + hist: hist{PosNeg: bins{ø, ø, ø, ø, ø, ø, ø, ø}.Into(), Zt: 0, Zc: 0}, + want: hist{PosNeg: bins{ø, ø, ø, ø, ø, ø, ø, ø}.Into(), Zt: 0, Zc: 0}, + }, { + // zt=2 is upper boundary of bucket 0. keep buckets [1:n] + hist: hist{PosNeg: bins{ø, ø, 1, 2, 3, 4, 5, ø}.Into(), Zt: 0, Zc: 2}, + want: hist{PosNeg: bins{ø, ø, ø, ø, 3, 4, 5, ø}.Into(), Zt: 2, Zc: 2 + 2*(1+2)}, + }, { + // zt=3 is within bucket 1. keep buckets [2:n] + // set zt=4 because it must cover full buckets + hist: hist{PosNeg: bins{ø, ø, 1, 2, 3, 4, 5, ø}.Into(), Zt: 0, Zc: 2}, + min: 3, + want: hist{PosNeg: bins{ø, ø, ø, ø, ø, 4, 5, ø}.Into(), Zt: 4, Zc: 2 + 2*(1+2+3)}, + }, { + // zt=2 is higher, but no change expected as no buckets in this range are populated + hist: hist{PosNeg: bins{ø, ø, ø, ø, ø, ø, 1, 1}.Into(), Zt: 1.0, Zc: 2}, + want: hist{PosNeg: bins{ø, ø, ø, ø, ø, ø, 1, 1}.Into(), Zt: 2.0, Zc: 2}, + }} + + for _, cs := range cases { + name := fmt.Sprintf("%.2f->%.2f", cs.hist.Zt, cs.want.Zt) + t.Run(name, func(t *testing.T) { + hist := cs.hist.Into() + want := cs.want.Into() + + zt := cs.min + if zt == 0 { + zt = want.ZeroThreshold() + } + expo.WidenZero(hist, zt) + + is := expotest.Is(t) + is.Equal(want, hist) + }) + } + + t.Run("panics", func(t *testing.T) { + assert.PanicsWithValue(t, "min must be larger than current threshold (1.500000)", func() { + hist := hist{Zt: 1.5}.Into() + expo.WidenZero(hist, 0.5) + }) + }) +} + +func TestSlice(t *testing.T) { + cases := []struct { + bins bins + want bins + }{{ + // -3 -2 -1 0 1 2 3 4 + bins: bins{ø, ø, ø, ø, ø, ø, ø, ø}, + want: bins{ø, ø, ø, ø, ø, ø, ø, ø}, + }, { + bins: bins{1, 2, 3, 4, 5, 6, 7, 8}, + want: bins{1, 2, 3, 4, 5, 6, 7, 8}, + }, { + bins: bins{ø, 2, 3, 4, 5, 6, 7, ø}, + want: bins{ø, ø, 3, 4, 5, ø, ø, ø}, + }} + + for _, cs := range cases { + from, to := 0, len(cs.want) + for i := 0; i < len(cs.want); i++ { + if cs.want[i] != ø { + from += i + break + } + } + for i := from; i < len(cs.want); i++ { + if cs.want[i] == ø { + to = i + break + } + } + from -= 3 + to -= 3 + + t.Run(fmt.Sprintf("[%d:%d]", from, to), func(t *testing.T) { + bins := cs.bins.Into() + want := cs.want.Into() + + expo.Abs(bins).Slice(from, to) + + is := expotest.Is(t) + is.Equal(want, bins) + }) + } + + t.Run("panics", func(t *testing.T) { + data := expo.Abs(bins{1, 2, 3, 4, 5, 6, 7, 8}.Into()) + assert.PanicsWithValue(t, "bad bounds: must be from<=to (got 8<=4)", func() { + data.Slice(8, 4) + }) + assert.PanicsWithValue(t, "-6:12 is out of bounds for -3:5", func() { + data.Slice(-6, 12) + }) + }) +} diff --git a/processor/deltatocumulativeprocessor/internal/data/expo_test.go b/processor/deltatocumulativeprocessor/internal/data/expo_test.go new file mode 100644 index 000000000000..b910b409cb55 --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/data/expo_test.go @@ -0,0 +1,131 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package data + +import ( + "math" + "testing" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/expotest" +) + +// represents none/absent/unset in several tests +const ø = math.MaxUint64 + +func TestAdd(t *testing.T) { + type expdp = expotest.Histogram + type bins = expotest.Bins + var obs0 = expotest.Observe0 + + cases := []struct { + name string + dp, in expdp + want expdp + flip bool + }{{ + name: "noop", + dp: expdp{PosNeg: bins{0, 0, 0, 0, 0, 0, 0, 0}.Into(), Count: 0}, + in: expdp{PosNeg: bins{0, 0, 0, 0, 0, 0, 0, 0}.Into(), Count: 0}, + want: expdp{PosNeg: bins{0, 0, 0, 0, 0, 0, 0, 0}.Into(), Count: 0}, + }, { + name: "simple", + dp: expdp{PosNeg: bins{0, 0, 0, 0, 0, 0, 0, 0}.Into(), Count: 0}, + in: expdp{PosNeg: bins{1, 2, 3, 4, 5, 6, 7, 8}.Into(), Count: 2 * (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8)}, + want: expdp{PosNeg: bins{1, 2, 3, 4, 5, 6, 7, 8}.Into(), Count: 2 * (0 + (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8))}, + }, { + name: "lower+shorter", + dp: expdp{PosNeg: bins{ø, ø, ø, ø, ø, 1, 1, 1}.Into(), Count: 2 * 3}, + in: expdp{PosNeg: bins{ø, ø, 1, 1, 1, 1, 1, ø}.Into(), Count: 2 * 5}, + want: expdp{PosNeg: bins{ø, ø, 1, 1, 1, 2, 2, 1}.Into(), Count: 2 * (3 + 5)}, + }, { + name: "longer", + dp: expdp{PosNeg: bins{1, 1, 1, 1, 1, ø, ø, ø}.Into(), Count: 2 * 5}, + in: expdp{PosNeg: bins{1, 1, 1, 1, 1, 1, 1, 1}.Into(), Count: 2 * 8}, + want: expdp{PosNeg: bins{2, 2, 2, 2, 2, 1, 1, 1}.Into(), Count: 2 * (5 + 8)}, + }, { + name: "optional/missing", flip: true, + dp: expdp{PosNeg: obs0(0.6, 2.4) /* */, Count: 2}, + in: expdp{PosNeg: obs0(1.5, 3.2, 6.3), Min: some(1.5), Max: some(6.3), Sum: some(11.0), Count: 3}, + want: expdp{PosNeg: obs0(0.6, 2.4, 1.5, 3.2, 6.3) /* */, Count: 5}, + }, { + name: "optional/min-max-sum", + dp: expdp{PosNeg: obs0(1.5, 5.3, 11.6) /* */, Min: some(1.5), Max: some(11.6), Sum: some(18.4), Count: 3}, + in: expdp{PosNeg: obs0(0.6, 3.3, 7.9) /* */, Min: some(0.6), Max: some(07.9), Sum: some(11.8), Count: 3}, + want: expdp{PosNeg: obs0(1.5, 5.3, 11.6, 0.6, 3.3, 7.9), Min: some(0.6), Max: some(11.6), Sum: some(30.2), Count: 6}, + }, { + name: "zero/count", + dp: expdp{PosNeg: bins{0, 1, 2}.Into(), Zt: 0, Zc: 3, Count: 5}, + in: expdp{PosNeg: bins{0, 1, 0}.Into(), Zt: 0, Zc: 2, Count: 3}, + want: expdp{PosNeg: bins{0, 2, 2}.Into(), Zt: 0, Zc: 5, Count: 8}, + }, { + name: "zero/diff", + dp: expdp{PosNeg: bins{ø, ø, 0, 1, 1, 1}.Into(), Zt: 0.0, Zc: 2}, + in: expdp{PosNeg: bins{ø, ø, ø, ø, 1, 1}.Into(), Zt: 2.0, Zc: 2}, + want: expdp{PosNeg: bins{ø, ø, ø, ø, 2, 2}.Into(), Zt: 2.0, Zc: 4 + 2*1}, + }, { + name: "zero/subzero", + dp: expdp{PosNeg: bins{ø, 1, 1, 1, 1, 1}.Into(), Zt: 0.2, Zc: 2}, + in: expdp{PosNeg: bins{ø, ø, 1, 1, 1, 1}.Into(), Zt: 0.3, Zc: 2}, + want: expdp{PosNeg: bins{ø, ø, 2, 2, 2, 2}.Into(), Zt: 0.5, Zc: 4 + 2*1}, + }, { + name: "negative-offset", + dp: expdp{PosNeg: rawbs([]uint64{ /* */ 1, 2}, -2)}, + in: expdp{PosNeg: rawbs([]uint64{1, 2, 3 /* */}, -5)}, + want: expdp{PosNeg: rawbs([]uint64{1, 2, 3, 1, 2}, -5)}, + }, { + name: "scale/diff", + dp: expdp{PosNeg: expotest.Observe(expo.Scale(1), 1, 2, 3, 4), Scale: 1}, + in: expdp{PosNeg: expotest.Observe(expo.Scale(0), 4, 3, 2, 1), Scale: 0}, + want: expdp{Scale: 0, PosNeg: func() expo.Buckets { + bs := pmetric.NewExponentialHistogramDataPointBuckets() + expotest.ObserveInto(bs, expo.Scale(0), 1, 2, 3, 4) + expotest.ObserveInto(bs, expo.Scale(0), 4, 3, 2, 1) + bs.BucketCounts().Append([]uint64{0, 0}...) // rescaling leaves zeroed memory. this is expected + return bs + }()}, + }} + + for _, cs := range cases { + run := func(dp, in expdp) func(t *testing.T) { + return func(t *testing.T) { + is := expotest.Is(t) + + var ( + dp = ExpHistogram{dp.Into()} + in = ExpHistogram{in.Into()} + want = ExpHistogram{cs.want.Into()} + ) + + dp.SetTimestamp(0) + in.SetTimestamp(1) + want.SetTimestamp(1) + + got := dp.Add(in) + is.Equal(want.DataPoint, got.DataPoint) + } + } + + if cs.flip { + t.Run(cs.name+"-dp", run(cs.dp, cs.in)) + t.Run(cs.name+"-in", run(cs.in, cs.dp)) + continue + } + t.Run(cs.name, run(cs.dp, cs.in)) + } + +} + +func rawbs(data []uint64, offset int32) expo.Buckets { + bs := pmetric.NewExponentialHistogramDataPointBuckets() + bs.BucketCounts().FromRaw(data) + bs.SetOffset(offset) + return bs +} + +func some[T any](v T) *T { + return &v +} diff --git a/processor/deltatocumulativeprocessor/internal/metrics/data.go b/processor/deltatocumulativeprocessor/internal/metrics/data.go index c305c85d781e..f063475055f7 100644 --- a/processor/deltatocumulativeprocessor/internal/metrics/data.go +++ b/processor/deltatocumulativeprocessor/internal/metrics/data.go @@ -47,7 +47,7 @@ type ExpHistogram Metric func (s ExpHistogram) At(i int) data.ExpHistogram { dp := Metric(s).ExponentialHistogram().DataPoints().At(i) - return data.ExpHistogram{ExponentialHistogramDataPoint: dp} + return data.ExpHistogram{DataPoint: dp} } func (s ExpHistogram) Len() int { diff --git a/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go b/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go index f3b88ef8b96a..946ffd98d1d6 100644 --- a/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go +++ b/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go @@ -19,11 +19,14 @@ import ( type Telemetry struct { Metrics + + meter metric.Meter } func New(meter metric.Meter) Telemetry { return Telemetry{ Metrics: metrics(meter), + meter: meter, } } @@ -89,23 +92,23 @@ func metrics(meter metric.Meter) Metrics { } } -func (m Metrics) WithLimit(meter metric.Meter, max int64) { +func (tel Telemetry) WithLimit(max int64) { then := metric.Callback(func(_ context.Context, o metric.Observer) error { - o.ObserveInt64(m.streams.limit, max) + o.ObserveInt64(tel.streams.limit, max) return nil }) - _, err := meter.RegisterCallback(then, m.streams.limit) + _, err := tel.meter.RegisterCallback(then, tel.streams.limit) if err != nil { panic(err) } } -func (m Metrics) WithStale(meter metric.Meter, max time.Duration) { +func (tel Telemetry) WithStale(max time.Duration) { then := metric.Callback(func(_ context.Context, o metric.Observer) error { - o.ObserveInt64(m.streams.stale, int64(max.Seconds())) + o.ObserveInt64(tel.streams.stale, int64(max.Seconds())) return nil }) - _, err := meter.RegisterCallback(then, m.streams.stale) + _, err := tel.meter.RegisterCallback(then, tel.streams.stale) if err != nil { panic(err) } diff --git a/processor/deltatocumulativeprocessor/processor.go b/processor/deltatocumulativeprocessor/processor.go index 59fe2c7c4c0c..01e1cef4f916 100644 --- a/processor/deltatocumulativeprocessor/processor.go +++ b/processor/deltatocumulativeprocessor/processor.go @@ -34,8 +34,8 @@ type Processor struct { ctx context.Context cancel context.CancelFunc - aggr streams.Aggregator[data.Number] - stale maybe.Ptr[staleness.Staleness[data.Number]] + sums Pipeline[data.Number] + expo Pipeline[data.ExpHistogram] mtx sync.Mutex } @@ -43,29 +43,43 @@ type Processor struct { func newProcessor(cfg *Config, log *zap.Logger, meter metric.Meter, next consumer.Metrics) *Processor { ctx, cancel := context.WithCancel(context.Background()) + tel := telemetry.New(meter) + proc := Processor{ log: log, ctx: ctx, cancel: cancel, next: next, + + sums: pipeline[data.Number](cfg, &tel), + expo: pipeline[data.ExpHistogram](cfg, &tel), } - tel := telemetry.New(meter) + return &proc +} + +type Pipeline[D data.Point[D]] struct { + aggr streams.Aggregator[D] + stale maybe.Ptr[staleness.Staleness[D]] +} + +func pipeline[D data.Point[D]](cfg *Config, tel *telemetry.Telemetry) Pipeline[D] { + var pipe Pipeline[D] - var dps streams.Map[data.Number] - dps = delta.New[data.Number]() + var dps streams.Map[D] + dps = delta.New[D]() dps = telemetry.ObserveItems(dps, &tel.Metrics) if cfg.MaxStale > 0 { - tel.WithStale(meter, cfg.MaxStale) + tel.WithStale(cfg.MaxStale) stale := maybe.Some(staleness.NewStaleness(cfg.MaxStale, dps)) - proc.stale = stale + pipe.stale = stale dps, _ = stale.Try() } if cfg.MaxStreams > 0 { - tel.WithLimit(meter, int64(cfg.MaxStreams)) + tel.WithLimit(int64(cfg.MaxStreams)) lim := streams.Limit(dps, cfg.MaxStreams) - if stale, ok := proc.stale.Try(); ok { + if stale, ok := pipe.stale.Try(); ok { lim.Evictor = stale } dps = lim @@ -73,13 +87,14 @@ func newProcessor(cfg *Config, log *zap.Logger, meter metric.Meter, next consume dps = telemetry.ObserveNonFatal(dps, &tel.Metrics) - proc.aggr = streams.IntoAggregator(dps) - return &proc + pipe.aggr = streams.IntoAggregator(dps) + return pipe } func (p *Processor) Start(_ context.Context, _ component.Host) error { - stale, ok := p.stale.Try() - if !ok { + sums, sok := p.sums.stale.Try() + expo, eok := p.expo.stale.Try() + if !(sok && eok) { return nil } @@ -91,7 +106,8 @@ func (p *Processor) Start(_ context.Context, _ component.Host) error { return case <-tick.C: p.mtx.Lock() - stale.ExpireOldEntries() + sums.ExpireOldEntries() + expo.ExpireOldEntries() p.mtx.Unlock() } } @@ -109,27 +125,34 @@ func (p *Processor) Capabilities() consumer.Capabilities { } func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + if err := context.Cause(p.ctx); err != nil { + return err + } + p.mtx.Lock() defer p.mtx.Unlock() var errs error - metrics.Each(md, func(m metrics.Metric) { switch m.Type() { case pmetric.MetricTypeSum: sum := m.Sum() if sum.AggregationTemporality() == pmetric.AggregationTemporalityDelta { - err := streams.Aggregate[data.Number](metrics.Sum(m), p.aggr) + err := streams.Aggregate(metrics.Sum(m), p.sums.aggr) errs = errors.Join(errs, err) sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } case pmetric.MetricTypeHistogram: // TODO case pmetric.MetricTypeExponentialHistogram: - // TODO + expo := m.ExponentialHistogram() + if expo.AggregationTemporality() == pmetric.AggregationTemporalityDelta { + err := streams.Aggregate(metrics.ExpHistogram(m), p.expo.aggr) + errs = errors.Join(errs, err) + expo.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + } } }) - if errs != nil { return errs } From 90935cec1e3cd64f263433f8b42b465f585b333b Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Tue, 14 May 2024 15:44:31 +0300 Subject: [PATCH 04/53] [pkg/stanza] Add container operator parser (#32594) **Description:** This PR implements the new container logs parser as it was proposed at https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/31959. **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/31959 **Testing:** Added unit tests. Providing manual testing steps as well: ### How to test this manually 1. Using the following config file: ```yaml receivers: filelog: start_at: end include_file_name: false include_file_path: true include: - /var/log/pods/*/*/*.log operators: - id: container-parser type: container output: m1 - type: move id: m1 from: attributes.k8s.pod.name to: attributes.val - id: some type: add field: attributes.key2.key_in value: val2 exporters: debug: verbosity: detailed service: pipelines: logs: receivers: [filelog] exporters: [debug] processors: [] ``` 2. Start the collector: `./bin/otelcontribcol_linux_amd64 --config ~/otelcol/container_parser/config.yaml` 3. Use the following bash script to create some logs: ```bash #! /bin/bash echo '2024-04-13T07:59:37.505201169-05:00 stdout P This is a very very long crio line th' >> /var/log/pods/kube-scheduler-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d3/kube-scheduler43/1.log echo '{"log":"INFO: log line here","stream":"stdout","time":"2029-03-30T08:31:20.545192187Z"}' >> /var/log/pods/kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log echo '2024-04-13T07:59:37.505201169-05:00 stdout F at is awesome! crio is awesome!' >> /var/log/pods/kube-scheduler-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d3/kube-scheduler43/1.log echo '2021-06-22T10:27:25.813799277Z stdout P some containerd log th' >> /var/log/pods/kube-scheduler-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d3/kube-scheduler44/1.log echo '{"log":"INFO: another log line here","stream":"stdout","time":"2029-03-30T08:31:20.545192187Z"}' >> /var/log/pods/kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log echo '2021-06-22T10:27:25.813799277Z stdout F at is super awesome! Containerd is awesome' >> /var/log/pods/kube-scheduler-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d3/kube-scheduler44/1.log echo '2024-04-13T07:59:37.505201169-05:00 stdout F standalone crio line which is awesome!' >> /var/log/pods/kube-scheduler-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d3/kube-scheduler43/1.log echo '2021-06-22T10:27:25.813799277Z stdout F standalone containerd line that is super awesome!' >> /var/log/pods/kube-scheduler-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d3/kube-scheduler44/1.log ``` 4. Run the above as a bash script to verify any parallel processing. Verify that the output is correct. ### Test manually on k8s 1. `make docker-otelcontribcol && docker tag otelcontribcol otelcontribcol-dev:0.0.1 && kind load docker-image otelcontribcol-dev:0.0.1` 2. Install using the following helm values file: ```yaml mode: daemonset presets: logsCollection: enabled: true image: repository: otelcontribcol-dev tag: "0.0.1" pullPolicy: IfNotPresent command: name: otelcontribcol config: exporters: debug: verbosity: detailed receivers: filelog: start_at: end include_file_name: false include_file_path: true exclude: - /var/log/pods/default_daemonset-opentelemetry-collector*_*/opentelemetry-collector/*.log include: - /var/log/pods/*/*/*.log operators: - id: container-parser type: container output: some - id: some type: add field: attributes.key2.key_in value: val2 service: pipelines: logs: receivers: [filelog] processors: [batch] exporters: [debug] ``` 3. Check collector's output to verify the logs are parsed properly: ```console 2024-05-10T07:52:02.307Z info LogsExporter {"kind": "exporter", "data_type": "logs", "name": "debug", "resource logs": 1, "log records": 2} 2024-05-10T07:52:02.307Z info ResourceLog #0 Resource SchemaURL: ScopeLogs #0 ScopeLogs SchemaURL: InstrumentationScope LogRecord #0 ObservedTimestamp: 2024-05-10 07:52:02.046236071 +0000 UTC Timestamp: 2024-05-10 07:52:01.92533954 +0000 UTC SeverityText: SeverityNumber: Unspecified(0) Body: Str(otel logs at 07:52:01) Attributes: -> log: Map({"iostream":"stdout"}) -> time: Str(2024-05-10T07:52:01.92533954Z) -> k8s: Map({"container":{"name":"busybox","restart_count":"0"},"namespace":{"name":"default"},"pod":{"name":"daemonset-logs-6f6mn","uid":"1069e46b-03b2-4532-a71f-aaec06c0197b"}}) -> logtag: Str(F) -> key2: Map({"key_in":"val2"}) -> log.file.path: Str(/var/log/pods/default_daemonset-logs-6f6mn_1069e46b-03b2-4532-a71f-aaec06c0197b/busybox/0.log) Trace ID: Span ID: Flags: 0 LogRecord #1 ObservedTimestamp: 2024-05-10 07:52:02.046411602 +0000 UTC Timestamp: 2024-05-10 07:52:02.027386192 +0000 UTC SeverityText: SeverityNumber: Unspecified(0) Body: Str(otel logs at 07:52:02) Attributes: -> log.file.path: Str(/var/log/pods/default_daemonset-logs-6f6mn_1069e46b-03b2-4532-a71f-aaec06c0197b/busybox/0.log) -> time: Str(2024-05-10T07:52:02.027386192Z) -> log: Map({"iostream":"stdout"}) -> logtag: Str(F) -> k8s: Map({"container":{"name":"busybox","restart_count":"0"},"namespace":{"name":"default"},"pod":{"name":"daemonset-logs-6f6mn","uid":"1069e46b-03b2-4532-a71f-aaec06c0197b"}}) -> key2: Map({"key_in":"val2"}) Trace ID: Span ID: Flags: 0 ... ``` **Documentation:** Added Signed-off-by: ChrsMark --- .chloggen/add_container_parser.yaml | 27 ++ pkg/stanza/adapter/register.go | 1 + pkg/stanza/docs/operators/container.md | 238 +++++++++++ pkg/stanza/operator/helper/regexp.go | 28 ++ .../operator/parser/container/config.go | 120 ++++++ .../operator/parser/container/config_test.go | 107 +++++ .../operator/parser/container/package_test.go | 14 + .../operator/parser/container/parser.go | 357 +++++++++++++++++ .../operator/parser/container/parser_test.go | 370 ++++++++++++++++++ .../parser/container/testdata/config.yaml | 41 ++ pkg/stanza/operator/parser/regex/parser.go | 17 +- 11 files changed, 1306 insertions(+), 14 deletions(-) create mode 100644 .chloggen/add_container_parser.yaml create mode 100644 pkg/stanza/docs/operators/container.md create mode 100644 pkg/stanza/operator/helper/regexp.go create mode 100644 pkg/stanza/operator/parser/container/config.go create mode 100644 pkg/stanza/operator/parser/container/config_test.go create mode 100644 pkg/stanza/operator/parser/container/package_test.go create mode 100644 pkg/stanza/operator/parser/container/parser.go create mode 100644 pkg/stanza/operator/parser/container/parser_test.go create mode 100644 pkg/stanza/operator/parser/container/testdata/config.yaml diff --git a/.chloggen/add_container_parser.yaml b/.chloggen/add_container_parser.yaml new file mode 100644 index 000000000000..b6b4406b8f43 --- /dev/null +++ b/.chloggen/add_container_parser.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: filelogreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add container operator parser + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [31959] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/pkg/stanza/adapter/register.go b/pkg/stanza/adapter/register.go index 8105ef17d587..426e456decfa 100644 --- a/pkg/stanza/adapter/register.go +++ b/pkg/stanza/adapter/register.go @@ -6,6 +6,7 @@ package adapter // import "github.com/open-telemetry/opentelemetry-collector-con import ( _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file" // Register parsers and transformers for stanza-based log receivers _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout" + _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container" _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/csv" _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/json" _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/jsonarray" diff --git a/pkg/stanza/docs/operators/container.md b/pkg/stanza/docs/operators/container.md new file mode 100644 index 000000000000..4cc972fbc5ed --- /dev/null +++ b/pkg/stanza/docs/operators/container.md @@ -0,0 +1,238 @@ +## `container` operator + +The `container` operator parses logs in `docker`, `cri-o` and `containerd` formats. + +### Configuration Fields + +| Field | Default | Description | +|------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `id` | `container` | A unique identifier for the operator. | +| `format` | `` | The container log format to use if it is known. Users can choose between `docker`, `crio` and `containerd`. If not set, the format will be automatically detected. | +| `add_metadata_from_filepath` | `true` | Set if k8s metadata should be added from the file path. Requires the `log.file.path` field to be present. | +| `output` | Next in pipeline | The connected operator(s) that will receive all outbound entries. | +| `parse_from` | `body` | The [field](../types/field.md) from which the value will be parsed. | +| `parse_to` | `attributes` | The [field](../types/field.md) to which the value will be parsed. | +| `on_error` | `send` | The behavior of the operator if it encounters an error. See [on_error](../types/on_error.md). | +| `if` | | An [expression](../types/expression.md) that, when set, will be evaluated to determine whether this operator should be used for the given entry. This allows you to do easy conditional parsing without branching logic with routers. | +| `severity` | `nil` | An optional [severity](../types/severity.md) block which will parse a severity field before passing the entry to the output operator. | + + +### Embedded Operations + +The `container` parser can be configured to embed certain operations such as the severity parsing. For more information, see [complex parsers](../types/parsers.md#complex-parsers). + +### Add metadata from file path + +Requires `include_file_path: true` in order for the `log.file.path` field to be available for the operator. +If that's not possible, users can disable the metadata addition with `add_metadata_from_filepath: false`. +A file path like `"/var/log/pods/some-ns_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log"`, +will produce the following k8s metadata: + +```json +{ + "attributes": { + "k8s": { + "container": { + "name": "kube-controller", + "restart_count": "1" + }, "pod": { + "uid": "49cc7c1fd3702c40b2686ea7486091d6", + "name": "kube-controller-kind-control-plane" + }, "namespace": { + "name": "some-ns" + } + } + } +} +``` + +### Example Configurations: + +#### Parse the body as docker container log + +Configuration: +```yaml +- type: container + format: docker + add_metadata_from_filepath: true +``` + +Note: in this example the `format: docker` is optional since formats can be automatically detected as well. + `add_metadata_from_filepath` is true by default as well. + + + + + + + +
Input body Output body
+ +```json +{ + "timestamp": "", + "body": "{\"log\":\"INFO: log line here\",\"stream\":\"stdout\",\"time\":\"2029-03-30T08:31:20.545192187Z\"}", + "log.file.path": "/var/log/pods/some_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log" +} +``` + + + +```json +{ + "timestamp": "2024-03-30 08:31:20.545192187 +0000 UTC", + "body": "log line here", + "attributes": { + "time": "2024-03-30T08:31:20.545192187Z", + "log.iostream": "stdout", + "k8s.pod.name": "kube-controller-kind-control-plane", + "k8s.pod.uid": "49cc7c1fd3702c40b2686ea7486091d6", + "k8s.container.name": "kube-controller", + "k8s.container.restart_count": "1", + "k8s.namespace.name": "some", + "log.file.path": "/var/log/pods/some_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log" + } +} +``` + +
+ +#### Parse the body as cri-o container log + +Configuration: +```yaml +- type: container +``` + + + + + + + +
Input body Output body
+ +```json +{ + "timestamp": "", + "body": "2024-04-13T07:59:37.505201169-05:00 stdout F standalone crio line which is awesome", + "log.file.path": "/var/log/pods/some_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log" +} +``` + + + +```json +{ + "timestamp": "2024-04-13 12:59:37.505201169 +0000 UTC", + "body": "standalone crio line which is awesome", + "attributes": { + "time": "2024-04-13T07:59:37.505201169-05:00", + "logtag": "F", + "log.iostream": "stdout", + "k8s.pod.name": "kube-controller-kind-control-plane", + "k8s.pod.uid": "49cc7c1fd3702c40b2686ea7486091d6", + "k8s.container.name": "kube-controller", + "k8s.container.restart_count": "1", + "k8s.namespace.name": "some", + "log.file.path": "/var/log/pods/some_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log" + } +} +``` + +
+ +#### Parse the body as containerd container log + +Configuration: +```yaml +- type: container +``` + + + + + + + +
Input body Output body
+ +```json +{ + "timestamp": "", + "body": "2023-06-22T10:27:25.813799277Z stdout F standalone containerd line that is super awesome", + "log.file.path": "/var/log/pods/some_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log" +} +``` + + + +```json +{ + "timestamp": "2023-06-22 10:27:25.813799277 +0000 UTC", + "body": "standalone containerd line that is super awesome", + "attributes": { + "time": "2023-06-22T10:27:25.813799277Z", + "logtag": "F", + "log.iostream": "stdout", + "k8s.pod.name": "kube-controller-kind-control-plane", + "k8s.pod.uid": "49cc7c1fd3702c40b2686ea7486091d6", + "k8s.container.name": "kube-controller", + "k8s.container.restart_count": "1", + "k8s.namespace.name": "some", + "log.file.path": "/var/log/pods/some_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log" + } +} +``` + +
+ +#### Parse the multiline as containerd container log and recombine into a single one + +Configuration: +```yaml +- type: container +``` + + + + + + + +
Input body Output body
+ +```json +{ + "timestamp": "", + "body": "2023-06-22T10:27:25.813799277Z stdout P multiline containerd line that i", + "log.file.path": "/var/log/pods/some_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log" +}, +{ + "timestamp": "", + "body": "2023-06-22T10:27:25.813799277Z stdout F s super awesomne", + "log.file.path": "/var/log/pods/some_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log" +} +``` + + + +```json +{ + "timestamp": "2023-06-22 10:27:25.813799277 +0000 UTC", + "body": "multiline containerd line that is super awesome", + "attributes": { + "time": "2023-06-22T10:27:25.813799277Z", + "logtag": "F", + "log.iostream": "stdout", + "k8s.pod.name": "kube-controller-kind-control-plane", + "k8s.pod.uid": "49cc7c1fd3702c40b2686ea7486091d6", + "k8s.container.name": "kube-controller", + "k8s.container.restart_count": "1", + "k8s.namespace.name": "some", + "log.file.path": "/var/log/pods/some_kube-controller-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d6/kube-controller/1.log" + } +} +``` + +
\ No newline at end of file diff --git a/pkg/stanza/operator/helper/regexp.go b/pkg/stanza/operator/helper/regexp.go new file mode 100644 index 000000000000..7306926ced79 --- /dev/null +++ b/pkg/stanza/operator/helper/regexp.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package helper // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" + +import ( + "fmt" + "regexp" +) + +func MatchValues(value string, regexp *regexp.Regexp) (map[string]any, error) { + matches := regexp.FindStringSubmatch(value) + if matches == nil { + return nil, fmt.Errorf("regex pattern does not match") + } + + parsedValues := map[string]any{} + for i, subexp := range regexp.SubexpNames() { + if i == 0 { + // Skip whole match + continue + } + if subexp != "" { + parsedValues[subexp] = matches[i] + } + } + return parsedValues, nil +} diff --git a/pkg/stanza/operator/parser/container/config.go b/pkg/stanza/operator/parser/container/config.go new file mode 100644 index 000000000000..fb6555708182 --- /dev/null +++ b/pkg/stanza/operator/parser/container/config.go @@ -0,0 +1,120 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package container // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container" + +import ( + "fmt" + "sync" + + jsoniter "github.com/json-iterator/go" + "go.opentelemetry.io/collector/component" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/transformer/recombine" +) + +const operatorType = "container" + +func init() { + operator.Register(operatorType, func() operator.Builder { return NewConfig() }) +} + +// NewConfig creates a new JSON parser config with default values +func NewConfig() *Config { + return NewConfigWithID(operatorType) +} + +// NewConfigWithID creates a new JSON parser config with default values +func NewConfigWithID(operatorID string) *Config { + return &Config{ + ParserConfig: helper.NewParserConfig(operatorID, operatorType), + Format: "", + AddMetadataFromFilePath: true, + } +} + +// Config is the configuration of a Container parser operator. +type Config struct { + helper.ParserConfig `mapstructure:",squash"` + + Format string `mapstructure:"format"` + AddMetadataFromFilePath bool `mapstructure:"add_metadata_from_filepath"` +} + +// Build will build a Container parser operator. +func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) { + parserOperator, err := c.ParserConfig.Build(set) + if err != nil { + return nil, err + } + + cLogEmitter := helper.NewLogEmitter(set.Logger.Sugar()) + recombineParser, err := createRecombine(set, cLogEmitter) + if err != nil { + return nil, fmt.Errorf("failed to create internal recombine config: %w", err) + } + + wg := sync.WaitGroup{} + + if c.Format != "" { + switch c.Format { + case dockerFormat, crioFormat, containerdFormat: + default: + return &Parser{}, errors.NewError( + "operator config has an invalid `format` field.", + "ensure that the `format` field is set to one of `docker`, `crio`, `containerd`.", + "format", c.OnError, + ) + } + } + + p := &Parser{ + ParserOperator: parserOperator, + recombineParser: recombineParser, + json: jsoniter.ConfigFastest, + format: c.Format, + addMetadataFromFilepath: c.AddMetadataFromFilePath, + crioLogEmitter: cLogEmitter, + criConsumers: &wg, + } + return p, nil +} + +// createRecombine creates an internal recombine operator which outputs to an async helper.LogEmitter +// the equivalent recombine config: +// +// combine_field: body +// combine_with: "" +// is_last_entry: attributes.logtag == 'F' +// max_log_size: 102400 +// source_identifier: attributes["log.file.path"] +// type: recombine +func createRecombine(set component.TelemetrySettings, cLogEmitter *helper.LogEmitter) (operator.Operator, error) { + recombineParserCfg := createRecombineConfig() + recombineParser, err := recombineParserCfg.Build(set) + if err != nil { + return nil, fmt.Errorf("failed to resolve internal recombine config: %w", err) + } + + // set the LogEmmiter as the output of the recombine parser + recombineParser.SetOutputIDs([]string{cLogEmitter.OperatorID}) + if err := recombineParser.SetOutputs([]operator.Operator{cLogEmitter}); err != nil { + return nil, fmt.Errorf("failed to set outputs of internal recombine") + } + + return recombineParser, nil +} + +func createRecombineConfig() *recombine.Config { + recombineParserCfg := recombine.NewConfigWithID(recombineInternalID) + recombineParserCfg.IsLastEntry = "attributes.logtag == 'F'" + recombineParserCfg.CombineField = entry.NewBodyField() + recombineParserCfg.CombineWith = "" + recombineParserCfg.SourceIdentifier = entry.NewAttributeField("log.file.path") + recombineParserCfg.MaxLogSize = 102400 + return recombineParserCfg +} diff --git a/pkg/stanza/operator/parser/container/config_test.go b/pkg/stanza/operator/parser/container/config_test.go new file mode 100644 index 000000000000..599c26c1b7fd --- /dev/null +++ b/pkg/stanza/operator/parser/container/config_test.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 +package container + +import ( + "path/filepath" + "testing" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/operatortest" +) + +func TestConfig(t *testing.T) { + operatortest.ConfigUnmarshalTests{ + DefaultConfig: NewConfig(), + TestsFile: filepath.Join(".", "testdata", "config.yaml"), + Tests: []operatortest.ConfigUnmarshalTest{ + { + Name: "default", + Expect: NewConfig(), + }, + { + Name: "parse_from_simple", + Expect: func() *Config { + cfg := NewConfig() + cfg.ParseFrom = entry.NewBodyField("from") + return cfg + }(), + }, + { + Name: "parse_to_simple", + Expect: func() *Config { + cfg := NewConfig() + cfg.ParseTo = entry.RootableField{Field: entry.NewBodyField("log")} + return cfg + }(), + }, + { + Name: "on_error_drop", + Expect: func() *Config { + cfg := NewConfig() + cfg.OnError = "drop" + return cfg + }(), + }, + { + Name: "severity", + Expect: func() *Config { + cfg := NewConfig() + parseField := entry.NewBodyField("severity_field") + severityField := helper.NewSeverityConfig() + severityField.ParseFrom = &parseField + mapping := map[string]any{ + "critical": "5xx", + "error": "4xx", + "info": "3xx", + "debug": "2xx", + } + severityField.Mapping = mapping + cfg.SeverityConfig = &severityField + return cfg + }(), + }, + { + Name: "format", + Expect: func() *Config { + cfg := NewConfig() + cfg.Format = "docker" + return cfg + }(), + }, + { + Name: "add_metadata_from_file_path", + Expect: func() *Config { + cfg := NewConfig() + cfg.AddMetadataFromFilePath = true + return cfg + }(), + }, + { + Name: "parse_to_attributes", + Expect: func() *Config { + p := NewConfig() + p.ParseTo = entry.RootableField{Field: entry.NewAttributeField()} + return p + }(), + }, + { + Name: "parse_to_body", + Expect: func() *Config { + p := NewConfig() + p.ParseTo = entry.RootableField{Field: entry.NewBodyField()} + return p + }(), + }, + { + Name: "parse_to_resource", + Expect: func() *Config { + p := NewConfig() + p.ParseTo = entry.RootableField{Field: entry.NewResourceField()} + return p + }(), + }, + }, + }.Run(t) +} diff --git a/pkg/stanza/operator/parser/container/package_test.go b/pkg/stanza/operator/parser/container/package_test.go new file mode 100644 index 000000000000..245776eec13d --- /dev/null +++ b/pkg/stanza/operator/parser/container/package_test.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package container + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/pkg/stanza/operator/parser/container/parser.go b/pkg/stanza/operator/parser/container/parser.go new file mode 100644 index 000000000000..d531925e9735 --- /dev/null +++ b/pkg/stanza/operator/parser/container/parser.go @@ -0,0 +1,357 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package container // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container" + +import ( + "context" + "errors" + "fmt" + "regexp" + "strings" + "sync" + "time" + + jsoniter "github.com/json-iterator/go" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" +) + +const dockerFormat = "docker" +const crioFormat = "crio" +const containerdFormat = "containerd" +const recombineInternalID = "recombine_container_internal" +const dockerPattern = "^\\{" +const crioPattern = "^(?P