From 7e05283f6df464b26011c0cb25db73ee29dbb600 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Fri, 10 Jan 2025 16:49:44 -0800 Subject: [PATCH] Add envoy metrics discovery bundle --- .../receivers/envoy.discovery.yaml | 39 ++ docker/docker-compose.yml | 7 + docker/envoy/Dockerfile | 1 + .../bundle.d/receivers/envoy.discovery.yaml | 35 ++ .../receivers/envoy.discovery.yaml.tmpl | 31 ++ .../discovery/bundle/bundle_gen.go | 2 + .../discovery/bundle/bundledfs_others.go | 1 + .../discovery/bundle/bundledfs_windows.go | 1 + .../discovery/bundle/components.go | 2 + tests/receivers/envoy/bundled_test.go | 88 +++++ tests/receivers/envoy/testdata/expected.yaml | 337 ++++++++++++++++++ .../envoy/testdata/otlp_exporter.yaml | 28 ++ 12 files changed, 572 insertions(+) create mode 100644 cmd/otelcol/config/collector/config.d.linux/receivers/envoy.discovery.yaml create mode 100644 docker/envoy/Dockerfile create mode 100644 internal/confmapprovider/discovery/bundle/bundle.d/receivers/envoy.discovery.yaml create mode 100644 internal/confmapprovider/discovery/bundle/bundle.d/receivers/envoy.discovery.yaml.tmpl create mode 100644 tests/receivers/envoy/bundled_test.go create mode 100644 tests/receivers/envoy/testdata/expected.yaml create mode 100644 tests/receivers/envoy/testdata/otlp_exporter.yaml diff --git a/cmd/otelcol/config/collector/config.d.linux/receivers/envoy.discovery.yaml b/cmd/otelcol/config/collector/config.d.linux/receivers/envoy.discovery.yaml new file mode 100644 index 0000000000..c2d633ab70 --- /dev/null +++ b/cmd/otelcol/config/collector/config.d.linux/receivers/envoy.discovery.yaml @@ -0,0 +1,39 @@ +##################################################################################### +# This file is generated by the Splunk Distribution of the OpenTelemetry Collector. # +# # +# It reflects the default configuration bundled in the Collector executable for use # +# in discovery mode (--discovery) and is provided for reference or customization. # +# Please note that any changes made to this file will need to be reconciled during # +# upgrades of the Collector. # +##################################################################################### +# prometheus: +# enabled: true +# rule: +# docker_observer: type == "container" and any([name, image, command], {# matches "(?i)envoy"}) and not (command matches "splunk.discovery") +# host_observer: type == "hostport" and command matches "(?i)envoy" and not (command matches "splunk.discovery") +# k8s_observer: type == "port" and pod.name matches "(?i)envoy" +# config: +# default: +# config: +# scrape_configs: +# - job_name: 'envoy' +# metrics_path: /stats/prometheus +# scrape_interval: 10s +# static_configs: +# - targets: ['`host`:9901'] +# metric_relabel_configs: +# - source_labels: [__name__] +# action: keep +# regex: '(envoy_cluster_upstream_cx_active|envoy_cluster_upstream_cx_total|envoy_cluster_upstream_cx_connect_fail|envoy_cluster_upstream_cx_connect_ms|envoy_cluster_upstream_rq_active|envoy_cluster_upstream_rq_total|envoy_cluster_upstream_rq_timeout|envoy_cluster_upstream_rq_pending_active|envoy_cluster_upstream_rq_pending_overflow|envoy_cluster_upstream_rq_time|envoy_cluster_membership_total|envoy_cluster_membership_degraded|envoy_cluster_membership_excluded|envoy_listener_downstream_cx_active|envoy_listener_downstream_cx_total|envoy_listener_downstream_cx_transport_socket_connect_timeout|envoy_listener_downstream_cx_overflow|envoy_listener_downstream_cx_overload_reject|envoy_listener_downstream_global_cx_overflow)' +# status: +# metrics: +# - status: successful +# strict: envoy_cluster_upstream_cx_active +# message: envoy prometheus receiver is working! +# statements: +# - status: failed +# regexp: "connection refused" +# message: The container is not serving http connections. +# - status: failed +# regexp: "dial tcp: lookup" +# message: Unable to resolve envoy prometheus tcp endpoint diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 61e090f1d0..7c8d6ff608 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -80,6 +80,13 @@ services: - integration environment: - ELASTIC_PASSWORD=$ELASTIC_PASSWORD + envoy: + image: quay.io/splunko11ytest/envoy:latest + profiles: + - integration + build: ./envoy + ports: + - "9901:9901" # Haproxy image for haproxy test: haproxy: image: quay.io/splunko11ytest/haproxy:latest diff --git a/docker/envoy/Dockerfile b/docker/envoy/Dockerfile new file mode 100644 index 0000000000..a15d7af790 --- /dev/null +++ b/docker/envoy/Dockerfile @@ -0,0 +1 @@ +FROM envoyproxy/envoy:v1.32-latest \ No newline at end of file diff --git a/internal/confmapprovider/discovery/bundle/bundle.d/receivers/envoy.discovery.yaml b/internal/confmapprovider/discovery/bundle/bundle.d/receivers/envoy.discovery.yaml new file mode 100644 index 0000000000..9555df3474 --- /dev/null +++ b/internal/confmapprovider/discovery/bundle/bundle.d/receivers/envoy.discovery.yaml @@ -0,0 +1,35 @@ +##################################################################################### +# Do not edit manually! # +# All changes must be made to associated .tmpl file before running 'make bundle.d'. # +##################################################################################### +prometheus: + enabled: true + rule: + docker_observer: type == "container" and any([name, image, command], {# matches "(?i)envoy"}) and not (command matches "splunk.discovery") + host_observer: type == "hostport" and command matches "(?i)envoy" and not (command matches "splunk.discovery") + k8s_observer: type == "port" and pod.name matches "(?i)envoy" + config: + default: + config: + scrape_configs: + - job_name: 'envoy' + metrics_path: /stats/prometheus + scrape_interval: 10s + static_configs: + - targets: ['`host`:9901'] + metric_relabel_configs: + - source_labels: [__name__] + action: keep + regex: '(envoy_cluster_upstream_cx_active|envoy_cluster_upstream_cx_total|envoy_cluster_upstream_cx_connect_fail|envoy_cluster_upstream_cx_connect_ms|envoy_cluster_upstream_rq_active|envoy_cluster_upstream_rq_total|envoy_cluster_upstream_rq_timeout|envoy_cluster_upstream_rq_pending_active|envoy_cluster_upstream_rq_pending_overflow|envoy_cluster_upstream_rq_time|envoy_cluster_membership_total|envoy_cluster_membership_degraded|envoy_cluster_membership_excluded|envoy_listener_downstream_cx_active|envoy_listener_downstream_cx_total|envoy_listener_downstream_cx_transport_socket_connect_timeout|envoy_listener_downstream_cx_overflow|envoy_listener_downstream_cx_overload_reject|envoy_listener_downstream_global_cx_overflow)' + status: + metrics: + - status: successful + strict: envoy_cluster_upstream_cx_active + message: envoy prometheus receiver is working! + statements: + - status: failed + regexp: "connection refused" + message: The container is not serving http connections. + - status: failed + regexp: "dial tcp: lookup" + message: Unable to resolve envoy prometheus tcp endpoint diff --git a/internal/confmapprovider/discovery/bundle/bundle.d/receivers/envoy.discovery.yaml.tmpl b/internal/confmapprovider/discovery/bundle/bundle.d/receivers/envoy.discovery.yaml.tmpl new file mode 100644 index 0000000000..0a1d4a1509 --- /dev/null +++ b/internal/confmapprovider/discovery/bundle/bundle.d/receivers/envoy.discovery.yaml.tmpl @@ -0,0 +1,31 @@ +{{ receiver "prometheus" }}: + enabled: true + rule: + docker_observer: type == "container" and any([name, image, command], {# matches "(?i)envoy"}) and not (command matches "splunk.discovery") + host_observer: type == "hostport" and command matches "(?i)envoy" and not (command matches "splunk.discovery") + k8s_observer: type == "port" and pod.name matches "(?i)envoy" + config: + default: + config: + scrape_configs: + - job_name: 'envoy' + metrics_path: /stats/prometheus + scrape_interval: 10s + static_configs: + - targets: ['`host`:9901'] + metric_relabel_configs: + - source_labels: [__name__] + action: keep + regex: '(envoy_cluster_upstream_cx_active|envoy_cluster_upstream_cx_total|envoy_cluster_upstream_cx_connect_fail|envoy_cluster_upstream_cx_connect_ms|envoy_cluster_upstream_rq_active|envoy_cluster_upstream_rq_total|envoy_cluster_upstream_rq_timeout|envoy_cluster_upstream_rq_pending_active|envoy_cluster_upstream_rq_pending_overflow|envoy_cluster_upstream_rq_time|envoy_cluster_membership_total|envoy_cluster_membership_degraded|envoy_cluster_membership_excluded|envoy_listener_downstream_cx_active|envoy_listener_downstream_cx_total|envoy_listener_downstream_cx_transport_socket_connect_timeout|envoy_listener_downstream_cx_overflow|envoy_listener_downstream_cx_overload_reject|envoy_listener_downstream_global_cx_overflow)' + status: + metrics: + - status: successful + strict: envoy_cluster_upstream_cx_active + message: envoy prometheus receiver is working! + statements: + - status: failed + regexp: "connection refused" + message: The container is not serving http connections. + - status: failed + regexp: "dial tcp: lookup" + message: Unable to resolve envoy prometheus tcp endpoint diff --git a/internal/confmapprovider/discovery/bundle/bundle_gen.go b/internal/confmapprovider/discovery/bundle/bundle_gen.go index 060af08308..12c2b77e66 100644 --- a/internal/confmapprovider/discovery/bundle/bundle_gen.go +++ b/internal/confmapprovider/discovery/bundle/bundle_gen.go @@ -25,6 +25,8 @@ //go:generate discoverybundler --render --template bundle.d/receivers/apache.discovery.yaml.tmpl //go:generate discoverybundler --render --commented --dir ../../../../cmd/otelcol/config/collector/config.d.linux/receivers -t bundle.d/receivers/apache.discovery.yaml.tmpl +//go:generate discoverybundler --render --template bundle.d/receivers/envoy.discovery.yaml.tmpl +//go:generate discoverybundler --render --commented --dir ../../../../cmd/otelcol/config/collector/config.d.linux/receivers -t bundle.d/receivers/envoy.discovery.yaml.tmpl //go:generate discoverybundler --render --template bundle.d/receivers/jmx-cassandra.discovery.yaml.tmpl //go:generate discoverybundler --render --commented --dir ../../../../cmd/otelcol/config/collector/config.d.linux/receivers -t bundle.d/receivers/jmx-cassandra.discovery.yaml.tmpl //go:generate discoverybundler --render --template bundle.d/receivers/kafkametrics.discovery.yaml.tmpl diff --git a/internal/confmapprovider/discovery/bundle/bundledfs_others.go b/internal/confmapprovider/discovery/bundle/bundledfs_others.go index f17509a558..292f6aaf50 100644 --- a/internal/confmapprovider/discovery/bundle/bundledfs_others.go +++ b/internal/confmapprovider/discovery/bundle/bundledfs_others.go @@ -26,6 +26,7 @@ import ( //go:embed bundle.d/extensions/host-observer.discovery.yaml //go:embed bundle.d/extensions/k8s-observer.discovery.yaml //go:embed bundle.d/receivers/apache.discovery.yaml +//go:embed bundle.d/receivers/envoy.discovery.yaml //go:embed bundle.d/receivers/jmx-cassandra.discovery.yaml //go:embed bundle.d/receivers/kafkametrics.discovery.yaml //go:embed bundle.d/receivers/mongodb.discovery.yaml diff --git a/internal/confmapprovider/discovery/bundle/bundledfs_windows.go b/internal/confmapprovider/discovery/bundle/bundledfs_windows.go index da13549b6e..aa7c83298c 100644 --- a/internal/confmapprovider/discovery/bundle/bundledfs_windows.go +++ b/internal/confmapprovider/discovery/bundle/bundledfs_windows.go @@ -26,6 +26,7 @@ import ( //go:embed bundle.d/extensions/host-observer.discovery.yaml //go:embed bundle.d/extensions/k8s-observer.discovery.yaml //go:embed bundle.d/receivers/apache.discovery.yaml +//go:embed bundle.d/receivers/envoy.discovery.yaml //go:embed bundle.d/receivers/jmx-cassandra.discovery.yaml //go:embed bundle.d/receivers/kafkametrics.discovery.yaml //go:embed bundle.d/receivers/mongodb.discovery.yaml diff --git a/internal/confmapprovider/discovery/bundle/components.go b/internal/confmapprovider/discovery/bundle/components.go index 7f025de19a..05bc6d5f16 100644 --- a/internal/confmapprovider/discovery/bundle/components.go +++ b/internal/confmapprovider/discovery/bundle/components.go @@ -32,6 +32,7 @@ var ( // in Components.Linux. If desired in windows BundledFS, ensure they are included in Components.Windows. receivers = []string{ "apache", + "envoy", "jmx-cassandra", "kafkametrics", "mongodb", @@ -66,6 +67,7 @@ var ( Windows: func() map[string]struct{} { windows := map[string]struct{}{ "apache": {}, + "envoy": {}, "jmx-cassandra": {}, "kafkametrics": {}, "mongodb": {}, diff --git a/tests/receivers/envoy/bundled_test.go b/tests/receivers/envoy/bundled_test.go new file mode 100644 index 0000000000..887ea21553 --- /dev/null +++ b/tests/receivers/envoy/bundled_test.go @@ -0,0 +1,88 @@ +// Copyright Splunk, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build integration + +package tests + +import ( + "fmt" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "path/filepath" + "testing" + "time" + + "github.com/signalfx/splunk-otel-collector/tests/testutils" +) + +func TestEnvoyDockerObserver(t *testing.T) { + t.Skip("Redis data points are also discovered since Redis runs, making this test fail.") + testutils.SkipIfNotContainerTest(t) + dockerSocket := testutils.CreateDockerSocketProxy(t) + require.NoError(t, dockerSocket.Start()) + t.Cleanup(func() { + dockerSocket.Stop() + }) + + tc := testutils.NewTestcase(t) + defer tc.PrintLogsOnFailure() + defer tc.ShutdownOTLPReceiverSink() + _, shutdown := tc.SplunkOtelCollectorContainer("otlp_exporter.yaml", func(collector testutils.Collector) testutils.Collector { + return collector.WithEnv(map[string]string{ + "SPLUNK_DISCOVERY_DURATION": "20s", + "SPLUNK_DISCOVERY_LOG_LEVEL": "debug", + }).WithArgs( + "--discovery", + "--set", `splunk.discovery.extensions.k8s_observer.enabled=false`, + "--set", `splunk.discovery.extensions.host_observer.enabled=false`, + "--set", fmt.Sprintf("splunk.discovery.extensions.docker_observer.config.endpoint=tcp://%s", dockerSocket.ContainerEndpoint), + ) + }) + defer shutdown() + + expected, err := golden.ReadMetrics(filepath.Join("testdata", "expected.yaml")) + require.NoError(t, err) + require.EventuallyWithT(t, func(tt *assert.CollectT) { + if len(tc.OTLPReceiverSink.AllMetrics()) == 0 { + assert.Fail(tt, "No metrics collected") + return + } + err := pmetrictest.CompareMetrics(expected, tc.OTLPReceiverSink.AllMetrics()[len(tc.OTLPReceiverSink.AllMetrics())-1], + pmetrictest.IgnoreResourceAttributeValue("service.instance.id"), + pmetrictest.IgnoreResourceAttributeValue("net.host.port"), + pmetrictest.IgnoreResourceAttributeValue("net.host.name"), + pmetrictest.IgnoreResourceAttributeValue("server.address"), + pmetrictest.IgnoreResourceAttributeValue("container.name"), + pmetrictest.IgnoreResourceAttributeValue("server.port"), + pmetrictest.IgnoreResourceAttributeValue("service.name"), + pmetrictest.IgnoreResourceAttributeValue("service_instance_id"), + pmetrictest.IgnoreResourceAttributeValue("service_version"), + pmetrictest.IgnoreMetricAttributeValue("service_version"), + pmetrictest.IgnoreMetricAttributeValue("service_instance_id"), + pmetrictest.IgnoreResourceAttributeValue("server.address"), + pmetrictest.IgnoreTimestamp(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreScopeVersion(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreMetricValues(), + ) + assert.NoError(tt, err) + }, 30*time.Second, 1*time.Second) +} diff --git a/tests/receivers/envoy/testdata/expected.yaml b/tests/receivers/envoy/testdata/expected.yaml new file mode 100644 index 0000000000..05d8d67eb7 --- /dev/null +++ b/tests/receivers/envoy/testdata/expected.yaml @@ -0,0 +1,337 @@ +resourceMetrics: + - resource: + attributes: + - key: container.image.name + value: + stringValue: envoyproxy/envoy + - key: http.scheme + value: + stringValue: http + - key: net.host.name + value: + stringValue: host.docker.internal + - key: net.host.port + value: + stringValue: "9901" + - key: server.address + value: + stringValue: host.docker.internal + - key: server.port + value: + stringValue: "9901" + - key: service.instance.id + value: + stringValue: host.docker.internal:9901 + - key: service.name + value: + stringValue: envoy + - key: url.scheme + value: + stringValue: http + scopeMetrics: + - metrics: + - gauge: + dataPoints: + - asDouble: 1 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: envoy_cluster_membership_total + - gauge: + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: envoy_cluster_upstream_cx_active + - gauge: + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: envoy_cluster_upstream_rq_active + - description: The scraping was successful + gauge: + dataPoints: + - asDouble: 1 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: up + - description: The number of samples the target exposed + gauge: + dataPoints: + - asDouble: 694 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: scrape_samples_scraped + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_cluster_upstream_cx_connect_fail + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_listener_downstream_global_cx_overflow + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_listener_address + value: + stringValue: 0.0.0.0_10000 + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: envoy_cluster_membership_degraded + - description: The approximate number of new series in this scrape + gauge: + dataPoints: + - asDouble: 17 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: scrape_series_added + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_cluster_upstream_cx_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_cluster_upstream_rq_pending_overflow + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_listener_downstream_cx_overload_reject + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_listener_address + value: + stringValue: 0.0.0.0_10000 + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + - description: Duration of the scrape + gauge: + dataPoints: + - asDouble: 0.044218584 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: scrape_duration_seconds + unit: s + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_listener_downstream_cx_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_listener_address + value: + stringValue: 0.0.0.0_10000 + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: envoy_cluster_upstream_rq_pending_active + - gauge: + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_listener_address + value: + stringValue: 0.0.0.0_10000 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: envoy_listener_downstream_cx_active + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_listener_downstream_cx_transport_socket_connect_timeout + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_listener_address + value: + stringValue: 0.0.0.0_10000 + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: envoy_cluster_membership_excluded + - description: The number of samples remaining after metric relabeling was applied + gauge: + dataPoints: + - asDouble: 17 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: gauge + name: scrape_samples_post_metric_relabeling + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_cluster_upstream_rq_timeout + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_cluster_upstream_rq_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_cluster_name + value: + stringValue: service_envoyproxy_io + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + - metadata: + - key: prometheus.type + value: + stringValue: counter + name: envoy_listener_downstream_cx_overflow + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: envoy_listener_address + value: + stringValue: 0.0.0.0_10000 + startTimeUnixNano: "1000000" + timeUnixNano: "1000000" + isMonotonic: true + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.116.0-9-g469980bb diff --git a/tests/receivers/envoy/testdata/otlp_exporter.yaml b/tests/receivers/envoy/testdata/otlp_exporter.yaml new file mode 100644 index 0000000000..5795634e25 --- /dev/null +++ b/tests/receivers/envoy/testdata/otlp_exporter.yaml @@ -0,0 +1,28 @@ +#receivers: +# prometheus: +# config: +# scrape_configs: +# - job_name: 'envoy' +# metrics_path: /stats/prometheus +# scrape_interval: 5s +# static_configs: +# - targets: [ 'host.docker.internal:9901' ] +# metric_relabel_configs: +# - source_labels: [ __name__ ] +# action: keep +# regex: '(envoy_cluster_upstream_cx_active|envoy_cluster_upstream_cx_total|envoy_cluster_upstream_cx_connect_fail|envoy_cluster_upstream_cx_connect_ms|envoy_cluster_upstream_rq_active|envoy_cluster_upstream_rq_total|envoy_cluster_upstream_rq_timeout|envoy_cluster_upstream_rq_pending_active|envoy_cluster_upstream_rq_pending_overflow|envoy_cluster_upstream_rq_time|envoy_cluster_membership_total|envoy_cluster_membership_degraded|envoy_cluster_membership_excluded|envoy_listener_downstream_cx_active|envoy_listener_downstream_cx_total|envoy_listener_downstream_cx_transport_socket_connect_timeout|envoy_listener_downstream_cx_overflow|envoy_listener_downstream_cx_overload_reject|envoy_listener_downstream_global_cx_overflow)' + + +exporters: + otlp: + endpoint: "${OTLP_ENDPOINT}" + tls: + insecure: true + +service: + telemetry: + logs: + level: debug + pipelines: + metrics: + exporters: [otlp] \ No newline at end of file