diff --git a/.chloggen/k8scluster-remove-node-metrics-from-metadata.yaml b/.chloggen/k8scluster-remove-node-metrics-from-metadata.yaml new file mode 100644 index 000000000000..60bd30b92d43 --- /dev/null +++ b/.chloggen/k8scluster-remove-node-metrics-from-metadata.yaml @@ -0,0 +1,23 @@ +# Use this changelog template to create an entry for release notes. +# If your change doesn't affect end users, such as a test fix or a tooling change, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: receiver/k8scluster + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Unify predefined and custom node metrics. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [24776] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + - Update metrics description and units to be consistent + - Remove predefined metrics definitions from metadata.yaml because they are controlled by `node_conditions_to_report` + and `allocatable_types_to_report` config options. diff --git a/receiver/k8sclusterreceiver/documentation.md b/receiver/k8sclusterreceiver/documentation.md index d3576121cf27..87b52fd36a9f 100644 --- a/receiver/k8sclusterreceiver/documentation.md +++ b/receiver/k8sclusterreceiver/documentation.md @@ -228,86 +228,6 @@ The current phase of namespaces (1 for active and 0 for terminating) | ---- | ----------- | ---------- | | 1 | Gauge | Int | -### k8s.node.allocatable_cpu - -How many CPU cores remaining that the node can allocate to pods - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {cores} | Gauge | Double | - -### k8s.node.allocatable_ephemeral_storage - -How many bytes of ephemeral storage remaining that the node can allocate to pods - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### k8s.node.allocatable_memory - -How many bytes of RAM memory remaining that the node can allocate to pods - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### k8s.node.allocatable_pods - -How many pods remaining the node can allocate - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {pods} | Gauge | Int | - -### k8s.node.allocatable_storage - -How many bytes of storage remaining that the node can allocate to pods - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| By | Gauge | Int | - -### k8s.node.condition_disk_pressure - -Whether this node is DiskPressure (1), not DiskPressure (0) or in an unknown state (-1) - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### k8s.node.condition_memory_pressure - -Whether this node is MemoryPressure (1), not MemoryPressure (0) or in an unknown state (-1) - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### k8s.node.condition_network_unavailable - -Whether this node is NetworkUnavailable (1), not NetworkUnavailable (0) or in an unknown state (-1) - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### k8s.node.condition_pid_pressure - -Whether this node is PidPressure (1), not PidPressure (0) or in an unknown state (-1) - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - -### k8s.node.condition_ready - -Whether this node is Ready (1), not Ready (0) or in an unknown state (-1) - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Int | - ### k8s.pod.phase Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) diff --git a/receiver/k8sclusterreceiver/internal/metadata/generated_config.go b/receiver/k8sclusterreceiver/internal/metadata/generated_config.go index 0fba977277a2..d9cd05d91c50 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/generated_config.go +++ b/receiver/k8sclusterreceiver/internal/metadata/generated_config.go @@ -52,16 +52,6 @@ type MetricsConfig struct { K8sJobMaxParallelPods MetricConfig `mapstructure:"k8s.job.max_parallel_pods"` K8sJobSuccessfulPods MetricConfig `mapstructure:"k8s.job.successful_pods"` K8sNamespacePhase MetricConfig `mapstructure:"k8s.namespace.phase"` - K8sNodeAllocatableCPU MetricConfig `mapstructure:"k8s.node.allocatable_cpu"` - K8sNodeAllocatableEphemeralStorage MetricConfig `mapstructure:"k8s.node.allocatable_ephemeral_storage"` - K8sNodeAllocatableMemory MetricConfig `mapstructure:"k8s.node.allocatable_memory"` - K8sNodeAllocatablePods MetricConfig `mapstructure:"k8s.node.allocatable_pods"` - K8sNodeAllocatableStorage MetricConfig `mapstructure:"k8s.node.allocatable_storage"` - K8sNodeConditionDiskPressure MetricConfig `mapstructure:"k8s.node.condition_disk_pressure"` - K8sNodeConditionMemoryPressure MetricConfig `mapstructure:"k8s.node.condition_memory_pressure"` - K8sNodeConditionNetworkUnavailable MetricConfig `mapstructure:"k8s.node.condition_network_unavailable"` - K8sNodeConditionPidPressure MetricConfig `mapstructure:"k8s.node.condition_pid_pressure"` - K8sNodeConditionReady MetricConfig `mapstructure:"k8s.node.condition_ready"` K8sPodPhase MetricConfig `mapstructure:"k8s.pod.phase"` K8sReplicasetAvailable MetricConfig `mapstructure:"k8s.replicaset.available"` K8sReplicasetDesired MetricConfig `mapstructure:"k8s.replicaset.desired"` @@ -162,36 +152,6 @@ func DefaultMetricsConfig() MetricsConfig { K8sNamespacePhase: MetricConfig{ Enabled: true, }, - K8sNodeAllocatableCPU: MetricConfig{ - Enabled: true, - }, - K8sNodeAllocatableEphemeralStorage: MetricConfig{ - Enabled: true, - }, - K8sNodeAllocatableMemory: MetricConfig{ - Enabled: true, - }, - K8sNodeAllocatablePods: MetricConfig{ - Enabled: true, - }, - K8sNodeAllocatableStorage: MetricConfig{ - Enabled: true, - }, - K8sNodeConditionDiskPressure: MetricConfig{ - Enabled: true, - }, - K8sNodeConditionMemoryPressure: MetricConfig{ - Enabled: true, - }, - K8sNodeConditionNetworkUnavailable: MetricConfig{ - Enabled: true, - }, - K8sNodeConditionPidPressure: MetricConfig{ - Enabled: true, - }, - K8sNodeConditionReady: MetricConfig{ - Enabled: true, - }, K8sPodPhase: MetricConfig{ Enabled: true, }, diff --git a/receiver/k8sclusterreceiver/internal/metadata/generated_config_test.go b/receiver/k8sclusterreceiver/internal/metadata/generated_config_test.go index 9b13dd2ae3da..4dde07183eb4 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/generated_config_test.go +++ b/receiver/k8sclusterreceiver/internal/metadata/generated_config_test.go @@ -53,16 +53,6 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sJobMaxParallelPods: MetricConfig{Enabled: true}, K8sJobSuccessfulPods: MetricConfig{Enabled: true}, K8sNamespacePhase: MetricConfig{Enabled: true}, - K8sNodeAllocatableCPU: MetricConfig{Enabled: true}, - K8sNodeAllocatableEphemeralStorage: MetricConfig{Enabled: true}, - K8sNodeAllocatableMemory: MetricConfig{Enabled: true}, - K8sNodeAllocatablePods: MetricConfig{Enabled: true}, - K8sNodeAllocatableStorage: MetricConfig{Enabled: true}, - K8sNodeConditionDiskPressure: MetricConfig{Enabled: true}, - K8sNodeConditionMemoryPressure: MetricConfig{Enabled: true}, - K8sNodeConditionNetworkUnavailable: MetricConfig{Enabled: true}, - K8sNodeConditionPidPressure: MetricConfig{Enabled: true}, - K8sNodeConditionReady: MetricConfig{Enabled: true}, K8sPodPhase: MetricConfig{Enabled: true}, K8sReplicasetAvailable: MetricConfig{Enabled: true}, K8sReplicasetDesired: MetricConfig{Enabled: true}, @@ -145,16 +135,6 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sJobMaxParallelPods: MetricConfig{Enabled: false}, K8sJobSuccessfulPods: MetricConfig{Enabled: false}, K8sNamespacePhase: MetricConfig{Enabled: false}, - K8sNodeAllocatableCPU: MetricConfig{Enabled: false}, - K8sNodeAllocatableEphemeralStorage: MetricConfig{Enabled: false}, - K8sNodeAllocatableMemory: MetricConfig{Enabled: false}, - K8sNodeAllocatablePods: MetricConfig{Enabled: false}, - K8sNodeAllocatableStorage: MetricConfig{Enabled: false}, - K8sNodeConditionDiskPressure: MetricConfig{Enabled: false}, - K8sNodeConditionMemoryPressure: MetricConfig{Enabled: false}, - K8sNodeConditionNetworkUnavailable: MetricConfig{Enabled: false}, - K8sNodeConditionPidPressure: MetricConfig{Enabled: false}, - K8sNodeConditionReady: MetricConfig{Enabled: false}, K8sPodPhase: MetricConfig{Enabled: false}, K8sReplicasetAvailable: MetricConfig{Enabled: false}, K8sReplicasetDesired: MetricConfig{Enabled: false}, diff --git a/receiver/k8sclusterreceiver/internal/metadata/generated_metrics.go b/receiver/k8sclusterreceiver/internal/metadata/generated_metrics.go index 866ea75323aa..cb5f1ba3cd20 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/generated_metrics.go +++ b/receiver/k8sclusterreceiver/internal/metadata/generated_metrics.go @@ -1335,496 +1335,6 @@ func newMetricK8sNamespacePhase(cfg MetricConfig) metricK8sNamespacePhase { return m } -type metricK8sNodeAllocatableCPU struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.allocatable_cpu metric with initial data. -func (m *metricK8sNodeAllocatableCPU) init() { - m.data.SetName("k8s.node.allocatable_cpu") - m.data.SetDescription("How many CPU cores remaining that the node can allocate to pods") - m.data.SetUnit("{cores}") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeAllocatableCPU) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeAllocatableCPU) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeAllocatableCPU) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeAllocatableCPU(cfg MetricConfig) metricK8sNodeAllocatableCPU { - m := metricK8sNodeAllocatableCPU{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricK8sNodeAllocatableEphemeralStorage struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.allocatable_ephemeral_storage metric with initial data. -func (m *metricK8sNodeAllocatableEphemeralStorage) init() { - m.data.SetName("k8s.node.allocatable_ephemeral_storage") - m.data.SetDescription("How many bytes of ephemeral storage remaining that the node can allocate to pods") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeAllocatableEphemeralStorage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeAllocatableEphemeralStorage) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeAllocatableEphemeralStorage) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeAllocatableEphemeralStorage(cfg MetricConfig) metricK8sNodeAllocatableEphemeralStorage { - m := metricK8sNodeAllocatableEphemeralStorage{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricK8sNodeAllocatableMemory struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.allocatable_memory metric with initial data. -func (m *metricK8sNodeAllocatableMemory) init() { - m.data.SetName("k8s.node.allocatable_memory") - m.data.SetDescription("How many bytes of RAM memory remaining that the node can allocate to pods") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeAllocatableMemory) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeAllocatableMemory) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeAllocatableMemory) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeAllocatableMemory(cfg MetricConfig) metricK8sNodeAllocatableMemory { - m := metricK8sNodeAllocatableMemory{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricK8sNodeAllocatablePods struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.allocatable_pods metric with initial data. -func (m *metricK8sNodeAllocatablePods) init() { - m.data.SetName("k8s.node.allocatable_pods") - m.data.SetDescription("How many pods remaining the node can allocate") - m.data.SetUnit("{pods}") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeAllocatablePods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeAllocatablePods) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeAllocatablePods) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeAllocatablePods(cfg MetricConfig) metricK8sNodeAllocatablePods { - m := metricK8sNodeAllocatablePods{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricK8sNodeAllocatableStorage struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.allocatable_storage metric with initial data. -func (m *metricK8sNodeAllocatableStorage) init() { - m.data.SetName("k8s.node.allocatable_storage") - m.data.SetDescription("How many bytes of storage remaining that the node can allocate to pods") - m.data.SetUnit("By") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeAllocatableStorage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeAllocatableStorage) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeAllocatableStorage) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeAllocatableStorage(cfg MetricConfig) metricK8sNodeAllocatableStorage { - m := metricK8sNodeAllocatableStorage{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricK8sNodeConditionDiskPressure struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.condition_disk_pressure metric with initial data. -func (m *metricK8sNodeConditionDiskPressure) init() { - m.data.SetName("k8s.node.condition_disk_pressure") - m.data.SetDescription("Whether this node is DiskPressure (1), not DiskPressure (0) or in an unknown state (-1)") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeConditionDiskPressure) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeConditionDiskPressure) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeConditionDiskPressure) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeConditionDiskPressure(cfg MetricConfig) metricK8sNodeConditionDiskPressure { - m := metricK8sNodeConditionDiskPressure{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricK8sNodeConditionMemoryPressure struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.condition_memory_pressure metric with initial data. -func (m *metricK8sNodeConditionMemoryPressure) init() { - m.data.SetName("k8s.node.condition_memory_pressure") - m.data.SetDescription("Whether this node is MemoryPressure (1), not MemoryPressure (0) or in an unknown state (-1)") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeConditionMemoryPressure) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeConditionMemoryPressure) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeConditionMemoryPressure) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeConditionMemoryPressure(cfg MetricConfig) metricK8sNodeConditionMemoryPressure { - m := metricK8sNodeConditionMemoryPressure{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricK8sNodeConditionNetworkUnavailable struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.condition_network_unavailable metric with initial data. -func (m *metricK8sNodeConditionNetworkUnavailable) init() { - m.data.SetName("k8s.node.condition_network_unavailable") - m.data.SetDescription("Whether this node is NetworkUnavailable (1), not NetworkUnavailable (0) or in an unknown state (-1)") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeConditionNetworkUnavailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeConditionNetworkUnavailable) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeConditionNetworkUnavailable) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeConditionNetworkUnavailable(cfg MetricConfig) metricK8sNodeConditionNetworkUnavailable { - m := metricK8sNodeConditionNetworkUnavailable{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricK8sNodeConditionPidPressure struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.condition_pid_pressure metric with initial data. -func (m *metricK8sNodeConditionPidPressure) init() { - m.data.SetName("k8s.node.condition_pid_pressure") - m.data.SetDescription("Whether this node is PidPressure (1), not PidPressure (0) or in an unknown state (-1)") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeConditionPidPressure) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeConditionPidPressure) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeConditionPidPressure) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeConditionPidPressure(cfg MetricConfig) metricK8sNodeConditionPidPressure { - m := metricK8sNodeConditionPidPressure{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricK8sNodeConditionReady struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills k8s.node.condition_ready metric with initial data. -func (m *metricK8sNodeConditionReady) init() { - m.data.SetName("k8s.node.condition_ready") - m.data.SetDescription("Whether this node is Ready (1), not Ready (0) or in an unknown state (-1)") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricK8sNodeConditionReady) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricK8sNodeConditionReady) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricK8sNodeConditionReady) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricK8sNodeConditionReady(cfg MetricConfig) metricK8sNodeConditionReady { - m := metricK8sNodeConditionReady{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - type metricK8sPodPhase struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2609,16 +2119,6 @@ type MetricsBuilder struct { metricK8sJobMaxParallelPods metricK8sJobMaxParallelPods metricK8sJobSuccessfulPods metricK8sJobSuccessfulPods metricK8sNamespacePhase metricK8sNamespacePhase - metricK8sNodeAllocatableCPU metricK8sNodeAllocatableCPU - metricK8sNodeAllocatableEphemeralStorage metricK8sNodeAllocatableEphemeralStorage - metricK8sNodeAllocatableMemory metricK8sNodeAllocatableMemory - metricK8sNodeAllocatablePods metricK8sNodeAllocatablePods - metricK8sNodeAllocatableStorage metricK8sNodeAllocatableStorage - metricK8sNodeConditionDiskPressure metricK8sNodeConditionDiskPressure - metricK8sNodeConditionMemoryPressure metricK8sNodeConditionMemoryPressure - metricK8sNodeConditionNetworkUnavailable metricK8sNodeConditionNetworkUnavailable - metricK8sNodeConditionPidPressure metricK8sNodeConditionPidPressure - metricK8sNodeConditionReady metricK8sNodeConditionReady metricK8sPodPhase metricK8sPodPhase metricK8sReplicasetAvailable metricK8sReplicasetAvailable metricK8sReplicasetDesired metricK8sReplicasetDesired @@ -2679,16 +2179,6 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricK8sJobMaxParallelPods: newMetricK8sJobMaxParallelPods(mbc.Metrics.K8sJobMaxParallelPods), metricK8sJobSuccessfulPods: newMetricK8sJobSuccessfulPods(mbc.Metrics.K8sJobSuccessfulPods), metricK8sNamespacePhase: newMetricK8sNamespacePhase(mbc.Metrics.K8sNamespacePhase), - metricK8sNodeAllocatableCPU: newMetricK8sNodeAllocatableCPU(mbc.Metrics.K8sNodeAllocatableCPU), - metricK8sNodeAllocatableEphemeralStorage: newMetricK8sNodeAllocatableEphemeralStorage(mbc.Metrics.K8sNodeAllocatableEphemeralStorage), - metricK8sNodeAllocatableMemory: newMetricK8sNodeAllocatableMemory(mbc.Metrics.K8sNodeAllocatableMemory), - metricK8sNodeAllocatablePods: newMetricK8sNodeAllocatablePods(mbc.Metrics.K8sNodeAllocatablePods), - metricK8sNodeAllocatableStorage: newMetricK8sNodeAllocatableStorage(mbc.Metrics.K8sNodeAllocatableStorage), - metricK8sNodeConditionDiskPressure: newMetricK8sNodeConditionDiskPressure(mbc.Metrics.K8sNodeConditionDiskPressure), - metricK8sNodeConditionMemoryPressure: newMetricK8sNodeConditionMemoryPressure(mbc.Metrics.K8sNodeConditionMemoryPressure), - metricK8sNodeConditionNetworkUnavailable: newMetricK8sNodeConditionNetworkUnavailable(mbc.Metrics.K8sNodeConditionNetworkUnavailable), - metricK8sNodeConditionPidPressure: newMetricK8sNodeConditionPidPressure(mbc.Metrics.K8sNodeConditionPidPressure), - metricK8sNodeConditionReady: newMetricK8sNodeConditionReady(mbc.Metrics.K8sNodeConditionReady), metricK8sPodPhase: newMetricK8sPodPhase(mbc.Metrics.K8sPodPhase), metricK8sReplicasetAvailable: newMetricK8sReplicasetAvailable(mbc.Metrics.K8sReplicasetAvailable), metricK8sReplicasetDesired: newMetricK8sReplicasetDesired(mbc.Metrics.K8sReplicasetDesired), @@ -2793,16 +2283,6 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricK8sJobMaxParallelPods.emit(ils.Metrics()) mb.metricK8sJobSuccessfulPods.emit(ils.Metrics()) mb.metricK8sNamespacePhase.emit(ils.Metrics()) - mb.metricK8sNodeAllocatableCPU.emit(ils.Metrics()) - mb.metricK8sNodeAllocatableEphemeralStorage.emit(ils.Metrics()) - mb.metricK8sNodeAllocatableMemory.emit(ils.Metrics()) - mb.metricK8sNodeAllocatablePods.emit(ils.Metrics()) - mb.metricK8sNodeAllocatableStorage.emit(ils.Metrics()) - mb.metricK8sNodeConditionDiskPressure.emit(ils.Metrics()) - mb.metricK8sNodeConditionMemoryPressure.emit(ils.Metrics()) - mb.metricK8sNodeConditionNetworkUnavailable.emit(ils.Metrics()) - mb.metricK8sNodeConditionPidPressure.emit(ils.Metrics()) - mb.metricK8sNodeConditionReady.emit(ils.Metrics()) mb.metricK8sPodPhase.emit(ils.Metrics()) mb.metricK8sReplicasetAvailable.emit(ils.Metrics()) mb.metricK8sReplicasetDesired.emit(ils.Metrics()) @@ -2973,56 +2453,6 @@ func (mb *MetricsBuilder) RecordK8sNamespacePhaseDataPoint(ts pcommon.Timestamp, mb.metricK8sNamespacePhase.recordDataPoint(mb.startTime, ts, val) } -// RecordK8sNodeAllocatableCPUDataPoint adds a data point to k8s.node.allocatable_cpu metric. -func (mb *MetricsBuilder) RecordK8sNodeAllocatableCPUDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricK8sNodeAllocatableCPU.recordDataPoint(mb.startTime, ts, val) -} - -// RecordK8sNodeAllocatableEphemeralStorageDataPoint adds a data point to k8s.node.allocatable_ephemeral_storage metric. -func (mb *MetricsBuilder) RecordK8sNodeAllocatableEphemeralStorageDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricK8sNodeAllocatableEphemeralStorage.recordDataPoint(mb.startTime, ts, val) -} - -// RecordK8sNodeAllocatableMemoryDataPoint adds a data point to k8s.node.allocatable_memory metric. -func (mb *MetricsBuilder) RecordK8sNodeAllocatableMemoryDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricK8sNodeAllocatableMemory.recordDataPoint(mb.startTime, ts, val) -} - -// RecordK8sNodeAllocatablePodsDataPoint adds a data point to k8s.node.allocatable_pods metric. -func (mb *MetricsBuilder) RecordK8sNodeAllocatablePodsDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricK8sNodeAllocatablePods.recordDataPoint(mb.startTime, ts, val) -} - -// RecordK8sNodeAllocatableStorageDataPoint adds a data point to k8s.node.allocatable_storage metric. -func (mb *MetricsBuilder) RecordK8sNodeAllocatableStorageDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricK8sNodeAllocatableStorage.recordDataPoint(mb.startTime, ts, val) -} - -// RecordK8sNodeConditionDiskPressureDataPoint adds a data point to k8s.node.condition_disk_pressure metric. -func (mb *MetricsBuilder) RecordK8sNodeConditionDiskPressureDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricK8sNodeConditionDiskPressure.recordDataPoint(mb.startTime, ts, val) -} - -// RecordK8sNodeConditionMemoryPressureDataPoint adds a data point to k8s.node.condition_memory_pressure metric. -func (mb *MetricsBuilder) RecordK8sNodeConditionMemoryPressureDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricK8sNodeConditionMemoryPressure.recordDataPoint(mb.startTime, ts, val) -} - -// RecordK8sNodeConditionNetworkUnavailableDataPoint adds a data point to k8s.node.condition_network_unavailable metric. -func (mb *MetricsBuilder) RecordK8sNodeConditionNetworkUnavailableDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricK8sNodeConditionNetworkUnavailable.recordDataPoint(mb.startTime, ts, val) -} - -// RecordK8sNodeConditionPidPressureDataPoint adds a data point to k8s.node.condition_pid_pressure metric. -func (mb *MetricsBuilder) RecordK8sNodeConditionPidPressureDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricK8sNodeConditionPidPressure.recordDataPoint(mb.startTime, ts, val) -} - -// RecordK8sNodeConditionReadyDataPoint adds a data point to k8s.node.condition_ready metric. -func (mb *MetricsBuilder) RecordK8sNodeConditionReadyDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricK8sNodeConditionReady.recordDataPoint(mb.startTime, ts, val) -} - // RecordK8sPodPhaseDataPoint adds a data point to k8s.pod.phase metric. func (mb *MetricsBuilder) RecordK8sPodPhaseDataPoint(ts pcommon.Timestamp, val int64) { mb.metricK8sPodPhase.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/k8sclusterreceiver/internal/metadata/generated_metrics_test.go b/receiver/k8sclusterreceiver/internal/metadata/generated_metrics_test.go index 8f85eeb86c1e..5521b7db9d45 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/k8sclusterreceiver/internal/metadata/generated_metrics_test.go @@ -162,46 +162,6 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordK8sNamespacePhaseDataPoint(ts, 1) - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeAllocatableCPUDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeAllocatableEphemeralStorageDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeAllocatableMemoryDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeAllocatablePodsDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeAllocatableStorageDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeConditionDiskPressureDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeConditionMemoryPressureDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeConditionNetworkUnavailableDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeConditionPidPressureDataPoint(ts, 1) - - defaultMetricsCount++ - allMetricsCount++ - mb.RecordK8sNodeConditionReadyDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordK8sPodPhaseDataPoint(ts, 1) @@ -640,126 +600,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "k8s.node.allocatable_cpu": - assert.False(t, validatedMetrics["k8s.node.allocatable_cpu"], "Found a duplicate in the metrics slice: k8s.node.allocatable_cpu") - validatedMetrics["k8s.node.allocatable_cpu"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "How many CPU cores remaining that the node can allocate to pods", ms.At(i).Description()) - assert.Equal(t, "{cores}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) - case "k8s.node.allocatable_ephemeral_storage": - assert.False(t, validatedMetrics["k8s.node.allocatable_ephemeral_storage"], "Found a duplicate in the metrics slice: k8s.node.allocatable_ephemeral_storage") - validatedMetrics["k8s.node.allocatable_ephemeral_storage"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "How many bytes of ephemeral storage remaining that the node can allocate to pods", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "k8s.node.allocatable_memory": - assert.False(t, validatedMetrics["k8s.node.allocatable_memory"], "Found a duplicate in the metrics slice: k8s.node.allocatable_memory") - validatedMetrics["k8s.node.allocatable_memory"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "How many bytes of RAM memory remaining that the node can allocate to pods", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "k8s.node.allocatable_pods": - assert.False(t, validatedMetrics["k8s.node.allocatable_pods"], "Found a duplicate in the metrics slice: k8s.node.allocatable_pods") - validatedMetrics["k8s.node.allocatable_pods"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "How many pods remaining the node can allocate", ms.At(i).Description()) - assert.Equal(t, "{pods}", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "k8s.node.allocatable_storage": - assert.False(t, validatedMetrics["k8s.node.allocatable_storage"], "Found a duplicate in the metrics slice: k8s.node.allocatable_storage") - validatedMetrics["k8s.node.allocatable_storage"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "How many bytes of storage remaining that the node can allocate to pods", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "k8s.node.condition_disk_pressure": - assert.False(t, validatedMetrics["k8s.node.condition_disk_pressure"], "Found a duplicate in the metrics slice: k8s.node.condition_disk_pressure") - validatedMetrics["k8s.node.condition_disk_pressure"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Whether this node is DiskPressure (1), not DiskPressure (0) or in an unknown state (-1)", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "k8s.node.condition_memory_pressure": - assert.False(t, validatedMetrics["k8s.node.condition_memory_pressure"], "Found a duplicate in the metrics slice: k8s.node.condition_memory_pressure") - validatedMetrics["k8s.node.condition_memory_pressure"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Whether this node is MemoryPressure (1), not MemoryPressure (0) or in an unknown state (-1)", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "k8s.node.condition_network_unavailable": - assert.False(t, validatedMetrics["k8s.node.condition_network_unavailable"], "Found a duplicate in the metrics slice: k8s.node.condition_network_unavailable") - validatedMetrics["k8s.node.condition_network_unavailable"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Whether this node is NetworkUnavailable (1), not NetworkUnavailable (0) or in an unknown state (-1)", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "k8s.node.condition_pid_pressure": - assert.False(t, validatedMetrics["k8s.node.condition_pid_pressure"], "Found a duplicate in the metrics slice: k8s.node.condition_pid_pressure") - validatedMetrics["k8s.node.condition_pid_pressure"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Whether this node is PidPressure (1), not PidPressure (0) or in an unknown state (-1)", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) - case "k8s.node.condition_ready": - assert.False(t, validatedMetrics["k8s.node.condition_ready"], "Found a duplicate in the metrics slice: k8s.node.condition_ready") - validatedMetrics["k8s.node.condition_ready"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Whether this node is Ready (1), not Ready (0) or in an unknown state (-1)", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) case "k8s.pod.phase": assert.False(t, validatedMetrics["k8s.pod.phase"], "Found a duplicate in the metrics slice: k8s.pod.phase") validatedMetrics["k8s.pod.phase"] = true diff --git a/receiver/k8sclusterreceiver/internal/metadata/testdata/config.yaml b/receiver/k8sclusterreceiver/internal/metadata/testdata/config.yaml index 1350cb8659f0..5e7149058ed5 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/k8sclusterreceiver/internal/metadata/testdata/config.yaml @@ -55,26 +55,6 @@ all_set: enabled: true k8s.namespace.phase: enabled: true - k8s.node.allocatable_cpu: - enabled: true - k8s.node.allocatable_ephemeral_storage: - enabled: true - k8s.node.allocatable_memory: - enabled: true - k8s.node.allocatable_pods: - enabled: true - k8s.node.allocatable_storage: - enabled: true - k8s.node.condition_disk_pressure: - enabled: true - k8s.node.condition_memory_pressure: - enabled: true - k8s.node.condition_network_unavailable: - enabled: true - k8s.node.condition_pid_pressure: - enabled: true - k8s.node.condition_ready: - enabled: true k8s.pod.phase: enabled: true k8s.replicaset.available: @@ -224,26 +204,6 @@ none_set: enabled: false k8s.namespace.phase: enabled: false - k8s.node.allocatable_cpu: - enabled: false - k8s.node.allocatable_ephemeral_storage: - enabled: false - k8s.node.allocatable_memory: - enabled: false - k8s.node.allocatable_pods: - enabled: false - k8s.node.allocatable_storage: - enabled: false - k8s.node.condition_disk_pressure: - enabled: false - k8s.node.condition_memory_pressure: - enabled: false - k8s.node.condition_network_unavailable: - enabled: false - k8s.node.condition_pid_pressure: - enabled: false - k8s.node.condition_ready: - enabled: false k8s.pod.phase: enabled: false k8s.replicaset.available: diff --git a/receiver/k8sclusterreceiver/internal/node/nodes.go b/receiver/k8sclusterreceiver/internal/node/nodes.go index 1ab807d99c3f..de5fd3551c1b 100644 --- a/receiver/k8sclusterreceiver/internal/node/nodes.go +++ b/receiver/k8sclusterreceiver/internal/node/nodes.go @@ -11,13 +11,13 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/maps" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/metadata" - imetadata "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/metadata" ) const ( @@ -43,34 +43,28 @@ func Transform(node *corev1.Node) *corev1.Node { return newNode } -func GetMetrics(set receiver.CreateSettings, metricsBuilderConfig imetadata.MetricsBuilderConfig, node *corev1.Node, nodeConditionTypesToReport, allocatableTypesToReport []string) pmetric.Metrics { - mb := imetadata.NewMetricsBuilder(metricsBuilderConfig, set) +func GetMetrics(set receiver.CreateSettings, metricsBuilderConfig metadata.MetricsBuilderConfig, node *corev1.Node, nodeConditionTypesToReport, allocatableTypesToReport []string) pmetric.Metrics { ts := pcommon.NewTimestampFromTime(time.Now()) - customMetrics := pmetric.NewMetricSlice() + ms := pmetric.NewMetrics() + rm := ms.ResourceMetrics().AppendEmpty() + + // TODO: Generate a schema URL for the node metrics in the metadata package and use them here. + rm.SetSchemaUrl(conventions.SchemaURL) + sm := rm.ScopeMetrics().AppendEmpty() + sm.Scope().SetName("otelcol/k8sclusterreceiver") + sm.Scope().SetVersion(set.BuildInfo.Version) // Adding 'node condition type' metrics for _, nodeConditionTypeValue := range nodeConditionTypesToReport { v1NodeConditionTypeValue := corev1.NodeConditionType(nodeConditionTypeValue) - v := nodeConditionValue(node, v1NodeConditionTypeValue) - switch v1NodeConditionTypeValue { - case corev1.NodeReady: - mb.RecordK8sNodeConditionReadyDataPoint(ts, v) - case corev1.NodeMemoryPressure: - mb.RecordK8sNodeConditionMemoryPressureDataPoint(ts, v) - case corev1.NodeDiskPressure: - mb.RecordK8sNodeConditionDiskPressureDataPoint(ts, v) - case corev1.NodeNetworkUnavailable: - mb.RecordK8sNodeConditionNetworkUnavailableDataPoint(ts, v) - case corev1.NodePIDPressure: - mb.RecordK8sNodeConditionPidPressureDataPoint(ts, v) - default: - customMetric := customMetrics.AppendEmpty() - customMetric.SetName(getNodeConditionMetric(nodeConditionTypeValue)) - g := customMetric.SetEmptyGauge() - dp := g.DataPoints().AppendEmpty() - dp.SetIntValue(v) - dp.SetTimestamp(ts) - } + m := sm.Metrics().AppendEmpty() + m.SetName(getNodeConditionMetric(nodeConditionTypeValue)) + m.SetDescription(fmt.Sprintf("%v condition status of the node (true=1, false=0, unknown=-1)", nodeConditionTypeValue)) + m.SetUnit("1") + g := m.SetEmptyGauge() + dp := g.DataPoints().AppendEmpty() + dp.SetIntValue(nodeConditionValue(node, v1NodeConditionTypeValue)) + dp.SetTimestamp(ts) } // Adding 'node allocatable type' metrics @@ -82,35 +76,21 @@ func GetMetrics(set receiver.CreateSettings, metricsBuilderConfig imetadata.Metr node.GetName()).Error()) continue } - //exhaustive:ignore - switch v1NodeAllocatableTypeValue { - case corev1.ResourceCPU: - // cpu metrics must be of the double type to adhere to opentelemetry system.cpu metric specifications - mb.RecordK8sNodeAllocatableCPUDataPoint(ts, float64(quantity.MilliValue())/1000.0) - case corev1.ResourceMemory: - mb.RecordK8sNodeAllocatableMemoryDataPoint(ts, quantity.Value()) - case corev1.ResourceEphemeralStorage: - mb.RecordK8sNodeAllocatableEphemeralStorageDataPoint(ts, quantity.Value()) - case corev1.ResourceStorage: - mb.RecordK8sNodeAllocatableStorageDataPoint(ts, quantity.Value()) - case corev1.ResourcePods: - mb.RecordK8sNodeAllocatablePodsDataPoint(ts, quantity.Value()) - default: - customMetric := customMetrics.AppendEmpty() - customMetric.SetName(getNodeAllocatableMetric(nodeAllocatableTypeValue)) - g := customMetric.SetEmptyGauge() - dp := g.DataPoints().AppendEmpty() - dp.SetIntValue(quantity.Value()) - dp.SetTimestamp(ts) - } + m := sm.Metrics().AppendEmpty() + m.SetName(getNodeAllocatableMetric(nodeAllocatableTypeValue)) + m.SetDescription(fmt.Sprintf("Amount of %v allocatable on the node", nodeAllocatableTypeValue)) + m.SetUnit(getNodeAllocatableUnit(v1NodeAllocatableTypeValue)) + g := m.SetEmptyGauge() + dp := g.DataPoints().AppendEmpty() + setNodeAllocatableValue(dp, v1NodeAllocatableTypeValue, quantity) + dp.SetTimestamp(ts) } - rb := imetadata.NewResourceBuilder(metricsBuilderConfig.ResourceAttributes) + rb := metadata.NewResourceBuilder(metricsBuilderConfig.ResourceAttributes) rb.SetK8sNodeUID(string(node.UID)) rb.SetK8sNodeName(node.Name) rb.SetOpencensusResourcetype("k8s") - m := mb.Emit(imetadata.WithResource(rb.Emit())) - customMetrics.MoveAndAppendTo(m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics()) - return m + rb.Emit().MoveTo(rm.Resource()) + return ms } @@ -152,6 +132,28 @@ func getNodeConditionMetric(nodeConditionTypeValue string) string { return fmt.Sprintf("k8s.node.condition_%s", strcase.ToSnake(nodeConditionTypeValue)) } +func getNodeAllocatableUnit(res corev1.ResourceName) string { + switch res { + case corev1.ResourceCPU: + return "{cpu}" + case corev1.ResourceMemory, corev1.ResourceEphemeralStorage, corev1.ResourceStorage: + return "By" + case corev1.ResourcePods: + return "{pod}" + default: + return fmt.Sprintf("{%s}", string(res)) + } +} + +func setNodeAllocatableValue(dp pmetric.NumberDataPoint, res corev1.ResourceName, q resource.Quantity) { + switch res { + case corev1.ResourceCPU: + dp.SetDoubleValue(float64(q.MilliValue()) / 1000.0) + default: + dp.SetIntValue(q.Value()) + } +} + func getNodeAllocatableMetric(nodeAllocatableTypeValue string) string { return fmt.Sprintf("k8s.node.allocatable_%s", strcase.ToSnake(nodeAllocatableTypeValue)) } diff --git a/receiver/k8sclusterreceiver/internal/node/testdata/expected.yaml b/receiver/k8sclusterreceiver/internal/node/testdata/expected.yaml index 107c833cc90a..26b5486a7ae9 100644 --- a/receiver/k8sclusterreceiver/internal/node/testdata/expected.yaml +++ b/receiver/k8sclusterreceiver/internal/node/testdata/expected.yaml @@ -13,72 +13,78 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: How many CPU cores remaining that the node can allocate to pods + - description: Amount of cpu allocatable on the node gauge: dataPoints: - asDouble: 0.123 name: k8s.node.allocatable_cpu - unit: '{cores}' - - description: How many bytes of ephemeral storage remaining that the node can allocate to pods + unit: '{cpu}' + - description: Amount of ephemeral-storage allocatable on the node gauge: dataPoints: - asInt: "1234" name: k8s.node.allocatable_ephemeral_storage unit: By - - description: How many bytes of RAM memory remaining that the node can allocate to pods + - description: Amount of memory allocatable on the node gauge: dataPoints: - asInt: "456" name: k8s.node.allocatable_memory unit: By - - description: How many pods remaining the node can allocate + - description: Amount of pods allocatable on the node gauge: dataPoints: - asInt: "12" name: k8s.node.allocatable_pods - unit: "{pods}" - - description: Whether this node is DiskPressure (1), not DiskPressure (0) or in an unknown state (-1) + unit: "{pod}" + - description: DiskPressure condition status of the node (true=1, false=0, unknown=-1) gauge: dataPoints: - asInt: "0" name: k8s.node.condition_disk_pressure unit: "1" - - description: Whether this node is MemoryPressure (1), not MemoryPressure (0) or in an unknown state (-1) + - description: MemoryPressure condition status of the node (true=1, false=0, unknown=-1) gauge: dataPoints: - asInt: "0" name: k8s.node.condition_memory_pressure unit: "1" - - description: Whether this node is NetworkUnavailable (1), not NetworkUnavailable (0) or in an unknown state (-1) + - description: NetworkUnavailable condition status of the node (true=1, false=0, unknown=-1) gauge: dataPoints: - asInt: "0" name: k8s.node.condition_network_unavailable unit: "1" - - description: Whether this node is PidPressure (1), not PidPressure (0) or in an unknown state (-1) + - description: PIDPressure condition status of the node (true=1, false=0, unknown=-1) gauge: dataPoints: - asInt: "0" name: k8s.node.condition_pid_pressure unit: "1" - - description: Whether this node is Ready (1), not Ready (0) or in an unknown state (-1) + - description: Ready condition status of the node (true=1, false=0, unknown=-1) gauge: dataPoints: - asInt: "1" name: k8s.node.condition_ready unit: "1" - - gauge: + - description: OutOfDisk condition status of the node (true=1, false=0, unknown=-1) + gauge: dataPoints: - asInt: "-1" name: k8s.node.condition_out_of_disk - - gauge: + unit: "1" + - description: Amount of hugepages-1Gi allocatable on the node + gauge: dataPoints: - asInt: "2" name: k8s.node.allocatable_hugepages_1_gi - - gauge: + unit: "{hugepages-1Gi}" + - description: Amount of hugepages-2Mi allocatable on the node + gauge: dataPoints: - asInt: "2048" name: k8s.node.allocatable_hugepages_2_mi + unit: "{hugepages-2Mi}" scope: name: otelcol/k8sclusterreceiver version: latest \ No newline at end of file diff --git a/receiver/k8sclusterreceiver/metadata.yaml b/receiver/k8sclusterreceiver/metadata.yaml index faf29b5c6206..15c8edb1eb64 100644 --- a/receiver/k8sclusterreceiver/metadata.yaml +++ b/receiver/k8sclusterreceiver/metadata.yaml @@ -357,67 +357,6 @@ metrics: gauge: value_type: int - k8s.node.condition_ready: - enabled: true - description: Whether this node is Ready (1), not Ready (0) or in an unknown state (-1) - unit: 1 - gauge: - value_type: int - k8s.node.condition_memory_pressure: - enabled: true - description: Whether this node is MemoryPressure (1), not MemoryPressure (0) or in an unknown state (-1) - unit: 1 - gauge: - value_type: int - k8s.node.condition_disk_pressure: - enabled: true - description: Whether this node is DiskPressure (1), not DiskPressure (0) or in an unknown state (-1) - unit: 1 - gauge: - value_type: int - k8s.node.condition_pid_pressure: - enabled: true - description: Whether this node is PidPressure (1), not PidPressure (0) or in an unknown state (-1) - unit: 1 - gauge: - value_type: int - k8s.node.condition_network_unavailable: - enabled: true - description: Whether this node is NetworkUnavailable (1), not NetworkUnavailable (0) or in an unknown state (-1) - unit: 1 - gauge: - value_type: int - k8s.node.allocatable_cpu: - enabled: true - description: How many CPU cores remaining that the node can allocate to pods - unit: "{cores}" - gauge: - value_type: double - k8s.node.allocatable_memory: - enabled: true - description: How many bytes of RAM memory remaining that the node can allocate to pods - unit: "By" - gauge: - value_type: int - k8s.node.allocatable_ephemeral_storage: - enabled: true - description: How many bytes of ephemeral storage remaining that the node can allocate to pods - unit: "By" - gauge: - value_type: int - k8s.node.allocatable_storage: - enabled: true - description: How many bytes of storage remaining that the node can allocate to pods - unit: "By" - gauge: - value_type: int - k8s.node.allocatable_pods: - enabled: true - description: How many pods remaining the node can allocate - unit: "{pods}" - gauge: - value_type: int - k8s.replicaset.desired: enabled: true description: Number of desired pods in this replicaset @@ -523,3 +462,9 @@ metrics: attributes: - k8s.namespace.name - resource + + # k8s.node.condition_* metrics (k8s.node.condition_ready, k8s.node.condition_memory_pressure, etc) are controlled + # by node_conditions_to_report config option. By default, only k8s.node.condition_ready is enabled. + + # k8s.node.allocatable_* metrics (k8s.node.allocatable_cpu, k8s.node.allocatable_memory, etc) are controlled + # by allocatable_types_to_report config option. By default, none of them are reported. diff --git a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml index 63ec1c246b39..7aa5a0127080 100644 --- a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml +++ b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml @@ -133,7 +133,7 @@ resourceMetrics: schemaUrl: "https://opentelemetry.io/schemas/1.18.0" scopeMetrics: - metrics: - - description: Whether this node is Ready (1), not Ready (0) or in an unknown state (-1) + - description: Ready condition status of the node (true=1, false=0, unknown=-1) gauge: dataPoints: - asInt: "1"