diff --git a/CHANGELOG.md b/CHANGELOG.md
index ec97e5d3e53f..d95e1c286b29 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -12,6 +12,8 @@
### 💡 Enhancements 💡
+- `kubeletstatsreceiver`: Update receiver to use new Metrics Builder. All emitted metrics remain the same. (#9744)
+
### 🧰 Bug fixes 🧰
## v0.51.0
diff --git a/receiver/kubeletstatsreceiver/config.go b/receiver/kubeletstatsreceiver/config.go
index d300a96e50a3..7c740d4fa082 100644
--- a/receiver/kubeletstatsreceiver/config.go
+++ b/receiver/kubeletstatsreceiver/config.go
@@ -26,6 +26,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig"
kube "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
var _ config.Receiver = (*Config)(nil)
@@ -48,6 +49,9 @@ type Config struct {
// Configuration of the Kubernetes API client.
K8sAPIConfig *k8sconfig.APIConfig `mapstructure:"k8s_api_config"`
+
+ // Metrics allows customizing scraped metrics representation.
+ Metrics metadata.MetricsSettings `mapstructure:"metrics"`
}
func (cfg *Config) Validate() error {
diff --git a/receiver/kubeletstatsreceiver/config_test.go b/receiver/kubeletstatsreceiver/config_test.go
index 9d3b86643c37..9ac9b5f0062d 100644
--- a/receiver/kubeletstatsreceiver/config_test.go
+++ b/receiver/kubeletstatsreceiver/config_test.go
@@ -31,6 +31,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig"
kube "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func TestLoadConfig(t *testing.T) {
@@ -59,6 +60,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.PodMetricGroup,
kubelet.NodeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, defaultCfg)
tlsCfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "tls")].(*Config)
@@ -86,6 +88,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.PodMetricGroup,
kubelet.NodeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, tlsCfg)
saCfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "sa")].(*Config)
@@ -105,6 +108,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.PodMetricGroup,
kubelet.NodeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, saCfg)
metadataCfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "metadata")].(*Config)
@@ -127,6 +131,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.PodMetricGroup,
kubelet.NodeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, metadataCfg)
metricGroupsCfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "metric_groups")].(*Config)
@@ -145,6 +150,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.NodeMetricGroup,
kubelet.VolumeMetricGroup,
},
+ Metrics: metadata.DefaultMetricsSettings(),
}, metricGroupsCfg)
metadataWithK8sAPICfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "metadata_with_k8s_api")].(*Config)
@@ -167,6 +173,7 @@ func TestLoadConfig(t *testing.T) {
kubelet.NodeMetricGroup,
},
K8sAPIConfig: &k8sconfig.APIConfig{AuthType: k8sconfig.AuthTypeKubeConfig},
+ Metrics: metadata.DefaultMetricsSettings(),
}, metadataWithK8sAPICfg)
}
diff --git a/receiver/kubeletstatsreceiver/doc.go b/receiver/kubeletstatsreceiver/doc.go
index 022e323883a9..0ca68ee9cbc6 100644
--- a/receiver/kubeletstatsreceiver/doc.go
+++ b/receiver/kubeletstatsreceiver/doc.go
@@ -15,6 +15,6 @@
//go:build !windows
// +build !windows
-//go:generate mdatagen metadata.yaml
+//go:generate mdatagen --experimental-gen metadata.yaml
package kubeletstatsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver"
diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md
index 3590dbc74cf5..d77f4d9a04ed 100644
--- a/receiver/kubeletstatsreceiver/documentation.md
+++ b/receiver/kubeletstatsreceiver/documentation.md
@@ -51,7 +51,14 @@ These are the metrics available for this scraper.
| **k8s.volume.inodes.free** | The free inodes in the filesystem. | 1 | Gauge(Int) |
|
| **k8s.volume.inodes.used** | The inodes used by the filesystem. This may not equal inodes - free because filesystem may share inodes with other filesystems. | 1 | Gauge(Int) | |
-**Highlighted metrics** are emitted by default.
+**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default.
+Any metric can be enabled or disabled with the following scraper configuration:
+
+```yaml
+metrics:
+ :
+ enabled:
+```
## Resource attributes
diff --git a/receiver/kubeletstatsreceiver/factory.go b/receiver/kubeletstatsreceiver/factory.go
index 21299debc87d..a5af38686f75 100644
--- a/receiver/kubeletstatsreceiver/factory.go
+++ b/receiver/kubeletstatsreceiver/factory.go
@@ -27,6 +27,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig"
kube "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
const (
@@ -59,6 +60,7 @@ func createDefaultConfig() config.Receiver {
AuthType: k8sconfig.AuthTypeTLS,
},
},
+ Metrics: metadata.DefaultMetricsSettings(),
}
}
@@ -78,7 +80,7 @@ func createMetricsReceiver(
return nil, err
}
- scrp, err := newKubletScraper(rest, set, rOptions)
+ scrp, err := newKubletScraper(rest, set, rOptions, cfg.Metrics)
if err != nil {
return nil, err
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
index 73e987f04220..b82e14e5564e 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
@@ -49,33 +49,25 @@ type metricDataAccumulator struct {
logger *zap.Logger
metricGroupsToCollect map[MetricGroup]bool
time time.Time
+ mbs *metadata.MetricsBuilders
}
-const (
- scopeName = "otelcol/kubeletstatsreceiver"
-)
-
func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) {
if !a.metricGroupsToCollect[NodeMetricGroup] {
return
}
- md := pmetric.NewMetrics()
- rm := md.ResourceMetrics().AppendEmpty()
- fillNodeResource(rm.Resource(), s)
-
- ilm := rm.ScopeMetrics().AppendEmpty()
- ilm.Scope().SetName(scopeName)
-
- startTime := pcommon.NewTimestampFromTime(s.StartTime.Time)
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(ilm.Metrics(), metadata.NodeCPUMetrics, s.CPU, startTime, currentTime)
- addMemoryMetrics(ilm.Metrics(), metadata.NodeMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(ilm.Metrics(), metadata.NodeFilesystemMetrics, s.Fs, currentTime)
- addNetworkMetrics(ilm.Metrics(), metadata.NodeNetworkMetrics, s.Network, startTime, currentTime)
+ addCPUMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeFilesystemMetrics, s.Fs, currentTime)
+ addNetworkMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeNetworkMetrics, s.Network, currentTime)
// todo s.Runtime.ImageFs
- a.m = append(a.m, md)
+ a.m = append(a.m, a.mbs.NodeMetricsBuilder.Emit(
+ metadata.WithStartTimeOverride(pcommon.NewTimestampFromTime(s.StartTime.Time)),
+ metadata.WithK8sNodeName(s.NodeName),
+ ))
}
func (a *metricDataAccumulator) podStats(s stats.PodStats) {
@@ -83,21 +75,18 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) {
return
}
- md := pmetric.NewMetrics()
- rm := md.ResourceMetrics().AppendEmpty()
- fillPodResource(rm.Resource(), s)
-
- ilm := rm.ScopeMetrics().AppendEmpty()
- ilm.Scope().SetName(scopeName)
-
- startTime := pcommon.NewTimestampFromTime(s.StartTime.Time)
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(ilm.Metrics(), metadata.PodCPUMetrics, s.CPU, startTime, currentTime)
- addMemoryMetrics(ilm.Metrics(), metadata.PodMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(ilm.Metrics(), metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
- addNetworkMetrics(ilm.Metrics(), metadata.PodNetworkMetrics, s.Network, startTime, currentTime)
-
- a.m = append(a.m, md)
+ addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
+ addNetworkMetrics(a.mbs.PodMetricsBuilder, metadata.PodNetworkMetrics, s.Network, currentTime)
+
+ a.m = append(a.m, a.mbs.PodMetricsBuilder.Emit(
+ metadata.WithStartTimeOverride(pcommon.NewTimestampFromTime(s.StartTime.Time)),
+ metadata.WithK8sPodUID(s.PodRef.UID),
+ metadata.WithK8sPodName(s.PodRef.Name),
+ metadata.WithK8sNamespaceName(s.PodRef.Namespace),
+ ))
}
func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.ContainerStats) {
@@ -105,10 +94,8 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont
return
}
- md := pmetric.NewMetrics()
- rm := md.ResourceMetrics().AppendEmpty()
-
- if err := fillContainerResource(rm.Resource(), sPod, s, a.metadata); err != nil {
+ ro, err := getContainerResourceOptions(sPod, s, a.metadata)
+ if err != nil {
a.logger.Warn(
"failed to fetch container metrics",
zap.String("pod", sPod.PodRef.Name),
@@ -117,15 +104,12 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont
return
}
- ilm := rm.ScopeMetrics().AppendEmpty()
- ilm.Scope().SetName(scopeName)
-
- startTime := pcommon.NewTimestampFromTime(s.StartTime.Time)
currentTime := pcommon.NewTimestampFromTime(a.time)
- addCPUMetrics(ilm.Metrics(), metadata.ContainerCPUMetrics, s.CPU, startTime, currentTime)
- addMemoryMetrics(ilm.Metrics(), metadata.ContainerMemoryMetrics, s.Memory, currentTime)
- addFilesystemMetrics(ilm.Metrics(), metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
- a.m = append(a.m, md)
+ addCPUMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime)
+ addMemoryMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
+ addFilesystemMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
+
+ a.m = append(a.m, a.mbs.ContainerMetricsBuilder.Emit(ro...))
}
func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeStats) {
@@ -133,10 +117,8 @@ func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeS
return
}
- md := pmetric.NewMetrics()
- rm := md.ResourceMetrics().AppendEmpty()
-
- if err := fillVolumeResource(rm.Resource(), sPod, s, a.metadata); err != nil {
+ ro, err := getVolumeResourceOptions(sPod, s, a.metadata)
+ if err != nil {
a.logger.Warn(
"Failed to gather additional volume metadata. Skipping metric collection.",
zap.String("pod", sPod.PodRef.Name),
@@ -145,10 +127,8 @@ func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeS
return
}
- ilm := rm.ScopeMetrics().AppendEmpty()
- ilm.Scope().SetName(scopeName)
-
currentTime := pcommon.NewTimestampFromTime(a.time)
- addVolumeMetrics(ilm.Metrics(), metadata.K8sVolumeMetrics, s, currentTime)
- a.m = append(a.m, md)
+ addVolumeMetrics(a.mbs.OtherMetricsBuilder, metadata.K8sVolumeMetrics, s, currentTime)
+
+ a.m = append(a.m, a.mbs.OtherMetricsBuilder.Emit(ro...))
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
index 49f9db843d4e..b05428d1ced2 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
@@ -26,6 +26,8 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
// TestMetadataErrorCases walks through the error cases of collecting
@@ -39,7 +41,7 @@ func TestMetadataErrorCases(t *testing.T) {
numMDs int
numLogs int
logMessages []string
- detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string, labels map[string]string) error
+ detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)
}{
{
name: "Fails to get container metadata",
@@ -176,9 +178,9 @@ func TestMetadataErrorCases(t *testing.T) {
},
},
}, nil),
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
// Mock failure cases.
- return errors.New("")
+ return nil, errors.New("")
},
testScenario: func(acc metricDataAccumulator) {
podStats := stats.PodStats{
@@ -205,11 +207,16 @@ func TestMetadataErrorCases(t *testing.T) {
observedLogger, logs := observer.New(zapcore.WarnLevel)
logger := zap.New(observedLogger)
- tt.metadata.DetailedPVCLabelsSetter = tt.detailedPVCLabelsSetterOverride
+ tt.metadata.DetailedPVCResourceGetter = tt.detailedPVCLabelsSetterOverride
acc := metricDataAccumulator{
metadata: tt.metadata,
logger: logger,
metricGroupsToCollect: tt.metricGroupsToCollect,
+ mbs: &metadata.MetricsBuilders{
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ },
}
tt.testScenario(acc)
@@ -231,6 +238,12 @@ func TestNilHandling(t *testing.T) {
ContainerMetricGroup: true,
VolumeMetricGroup: true,
},
+ mbs: &metadata.MetricsBuilders{
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ ContainerMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ },
}
assert.NotPanics(t, func() {
acc.nodeStats(stats.NodeStats{})
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go b/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
index f1541e04aab5..a693876ebeb2 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/conventions.go
@@ -15,9 +15,7 @@
package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
const (
- labelPersistentVolumeClaimName = "k8s.persistentvolumeclaim.name"
- labelVolumeName = "k8s.volume.name"
- labelVolumeType = "k8s.volume.type"
+ labelVolumeType = "k8s.volume.type"
// Volume types.
labelValuePersistentVolumeClaim = "persistentVolumeClaim"
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go
index 4d8a1dfbee3a..4f9bcaefab47 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go
@@ -16,32 +16,31 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func addCPUMetrics(dest pmetric.MetricSlice, cpuMetrics metadata.CPUMetrics, s *stats.CPUStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
+func addCPUMetrics(mb *metadata.MetricsBuilder, cpuMetrics metadata.CPUMetrics, s *stats.CPUStats, currentTime pcommon.Timestamp) {
if s == nil {
return
}
- addCPUUsageMetric(dest, cpuMetrics.Utilization, s, currentTime)
- addCPUTimeMetric(dest, cpuMetrics.Time, s, startTime, currentTime)
+ addCPUUsageMetric(mb, cpuMetrics.Utilization, s, currentTime)
+ addCPUTimeMetric(mb, cpuMetrics.Time, s, currentTime)
}
-func addCPUUsageMetric(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, s *stats.CPUStats, currentTime pcommon.Timestamp) {
+func addCPUUsageMetric(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordDoubleDataPointFunc, s *stats.CPUStats, currentTime pcommon.Timestamp) {
if s.UsageNanoCores == nil {
return
}
value := float64(*s.UsageNanoCores) / 1_000_000_000
- fillDoubleGauge(dest.AppendEmpty(), metricInt, value, currentTime)
+ recordDataPoint(mb, currentTime, value)
}
-func addCPUTimeMetric(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, s *stats.CPUStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
+func addCPUTimeMetric(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordDoubleDataPointFunc, s *stats.CPUStats, currentTime pcommon.Timestamp) {
if s.UsageCoreNanoSeconds == nil {
return
}
value := float64(*s.UsageCoreNanoSeconds) / 1_000_000_000
- fillDoubleSum(dest.AppendEmpty(), metricInt, value, startTime, currentTime)
+ recordDataPoint(mb, currentTime, value)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/fs.go b/receiver/kubeletstatsreceiver/internal/kubelet/fs.go
index 8d1d45a249b0..57f89aff39a8 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/fs.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/fs.go
@@ -16,18 +16,17 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func addFilesystemMetrics(dest pmetric.MetricSlice, filesystemMetrics metadata.FilesystemMetrics, s *stats.FsStats, currentTime pcommon.Timestamp) {
+func addFilesystemMetrics(mb *metadata.MetricsBuilder, filesystemMetrics metadata.FilesystemMetrics, s *stats.FsStats, currentTime pcommon.Timestamp) {
if s == nil {
return
}
- addIntGauge(dest, filesystemMetrics.Available, s.AvailableBytes, currentTime)
- addIntGauge(dest, filesystemMetrics.Capacity, s.CapacityBytes, currentTime)
- addIntGauge(dest, filesystemMetrics.Usage, s.UsedBytes, currentTime)
+ recordIntDataPoint(mb, filesystemMetrics.Available, s.AvailableBytes, currentTime)
+ recordIntDataPoint(mb, filesystemMetrics.Capacity, s.CapacityBytes, currentTime)
+ recordIntDataPoint(mb, filesystemMetrics.Usage, s.UsedBytes, currentTime)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go
index 17f04b424fcd..184b2b9cae8e 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go
@@ -16,21 +16,20 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func addMemoryMetrics(dest pmetric.MetricSlice, memoryMetrics metadata.MemoryMetrics, s *stats.MemoryStats, currentTime pcommon.Timestamp) {
+func addMemoryMetrics(mb *metadata.MetricsBuilder, memoryMetrics metadata.MemoryMetrics, s *stats.MemoryStats, currentTime pcommon.Timestamp) {
if s == nil {
return
}
- addIntGauge(dest, memoryMetrics.Available, s.AvailableBytes, currentTime)
- addIntGauge(dest, memoryMetrics.Usage, s.UsageBytes, currentTime)
- addIntGauge(dest, memoryMetrics.Rss, s.RSSBytes, currentTime)
- addIntGauge(dest, memoryMetrics.WorkingSet, s.WorkingSetBytes, currentTime)
- addIntGauge(dest, memoryMetrics.PageFaults, s.PageFaults, currentTime)
- addIntGauge(dest, memoryMetrics.MajorPageFaults, s.MajorPageFaults, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.Available, s.AvailableBytes, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.Usage, s.UsageBytes, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.Rss, s.RSSBytes, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.WorkingSet, s.WorkingSetBytes, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.PageFaults, s.PageFaults, currentTime)
+ recordIntDataPoint(mb, memoryMetrics.MajorPageFaults, s.MajorPageFaults, currentTime)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
index 2288d5acd453..bdb16b804c0f 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
@@ -23,6 +23,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
type MetadataLabel string
@@ -55,18 +57,18 @@ func ValidateMetadataLabelsConfig(labels []MetadataLabel) error {
}
type Metadata struct {
- Labels map[MetadataLabel]bool
- PodsMetadata *v1.PodList
- DetailedPVCLabelsSetter func(volCacheID, volumeClaim, namespace string, labels map[string]string) error
+ Labels map[MetadataLabel]bool
+ PodsMetadata *v1.PodList
+ DetailedPVCResourceGetter func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)
}
func NewMetadata(
labels []MetadataLabel, podsMetadata *v1.PodList,
- detailedPVCLabelsSetter func(volCacheID, volumeClaim, namespace string, labels map[string]string) error) Metadata {
+ detailedPVCResourceGetter func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)) Metadata {
return Metadata{
- Labels: getLabelsMap(labels),
- PodsMetadata: podsMetadata,
- DetailedPVCLabelsSetter: detailedPVCLabelsSetter,
+ Labels: getLabelsMap(labels),
+ PodsMetadata: podsMetadata,
+ DetailedPVCResourceGetter: detailedPVCResourceGetter,
}
}
@@ -78,45 +80,46 @@ func getLabelsMap(metadataLabels []MetadataLabel) map[MetadataLabel]bool {
return out
}
-// setExtraLabels sets extra labels in `labels` map based on provided metadata label.
-func (m *Metadata) setExtraLabels(
- labels map[string]string, podRef stats.PodReference,
- extraMetadataLabel MetadataLabel, extraMetadataFrom string) error {
+// getExtraResources gets extra resources based on provided metadata label.
+func (m *Metadata) getExtraResources(podRef stats.PodReference, extraMetadataLabel MetadataLabel,
+ extraMetadataFrom string) ([]metadata.ResourceMetricsOption, error) {
// Ensure MetadataLabel exists before proceeding.
if !m.Labels[extraMetadataLabel] || len(m.Labels) == 0 {
- return nil
+ return nil, nil
}
// Cannot proceed, if metadata is unavailable.
if m.PodsMetadata == nil {
- return errors.New("pods metadata were not fetched")
+ return nil, errors.New("pods metadata were not fetched")
}
switch extraMetadataLabel {
case MetadataLabelContainerID:
containerID, err := m.getContainerID(podRef.UID, extraMetadataFrom)
if err != nil {
- return err
+ return nil, err
}
- labels[conventions.AttributeContainerID] = containerID
+ return []metadata.ResourceMetricsOption{metadata.WithContainerID(containerID)}, nil
case MetadataLabelVolumeType:
volume, err := m.getPodVolume(podRef.UID, extraMetadataFrom)
if err != nil {
- return err
+ return nil, err
}
- getLabelsFromVolume(volume, labels)
+ ro := getResourcesFromVolume(volume)
// Get more labels from PersistentVolumeClaim volume type.
if volume.PersistentVolumeClaim != nil {
volCacheID := fmt.Sprintf("%s/%s", podRef.UID, extraMetadataFrom)
- if err := m.DetailedPVCLabelsSetter(volCacheID, labels[labelPersistentVolumeClaimName], podRef.Namespace,
- labels); err != nil {
- return fmt.Errorf("failed to set labels from volume claim: %w", err)
+ pvcResources, err := m.DetailedPVCResourceGetter(volCacheID, volume.PersistentVolumeClaim.ClaimName, podRef.Namespace)
+ if err != nil {
+ return nil, fmt.Errorf("failed to set labels from volume claim: %w", err)
}
+ ro = append(ro, pvcResources...)
}
+ return ro, nil
}
- return nil
+ return nil, nil
}
// getContainerID retrieves container id from metadata for given pod UID and container name,
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
index 2021e6513f6a..adfa0b2e6795 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
@@ -20,9 +20,12 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/pdata/pmetric"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func TestValidateMetadataLabelsConfig(t *testing.T) {
@@ -75,13 +78,13 @@ func TestSetExtraLabels(t *testing.T) {
metadata Metadata
args []string
wantError string
- want map[string]string
+ want map[string]interface{}
}{
{
name: "no_labels",
metadata: NewMetadata([]MetadataLabel{}, nil, nil),
args: []string{"uid", "container.id", "container"},
- want: map[string]string{},
+ want: map[string]interface{}{},
},
{
name: "set_container_id_valid",
@@ -103,7 +106,7 @@ func TestSetExtraLabels(t *testing.T) {
},
}, nil),
args: []string{"uid-1234", "container.id", "container1"},
- want: map[string]string{
+ want: map[string]interface{}{
string(MetadataLabelContainerID): "test-container",
},
},
@@ -166,11 +169,17 @@ func TestSetExtraLabels(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- fields := map[string]string{}
- err := tt.metadata.setExtraLabels(fields, stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), tt.args[2])
+ ro, err := tt.metadata.getExtraResources(stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), tt.args[2])
+
+ r := pmetric.NewResourceMetrics()
+ for _, op := range ro {
+ op(r)
+ }
+
if tt.wantError == "" {
require.NoError(t, err)
- assert.EqualValues(t, tt.want, fields)
+ temp := r.Resource().Attributes().AsRaw()
+ assert.EqualValues(t, tt.want, temp)
} else {
assert.Equal(t, tt.wantError, err.Error())
}
@@ -184,7 +193,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
name string
vs v1.VolumeSource
args []string
- want map[string]string
+ want map[string]interface{}
}{
{
name: "hostPath",
@@ -192,7 +201,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
HostPath: &v1.HostPathVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "hostPath",
},
},
@@ -202,7 +211,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
ConfigMap: &v1.ConfigMapVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "configMap",
},
},
@@ -212,7 +221,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
EmptyDir: &v1.EmptyDirVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "emptyDir",
},
},
@@ -222,7 +231,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
Secret: &v1.SecretVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "secret",
},
},
@@ -232,7 +241,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
DownwardAPI: &v1.DownwardAPIVolumeSource{},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "downwardAPI",
},
},
@@ -244,7 +253,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "persistentVolumeClaim",
"k8s.persistentvolumeclaim.name": "claim-name",
},
@@ -259,7 +268,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "awsElasticBlockStore",
"aws.volume.id": "volume_id",
"fs.type": "fs_type",
@@ -276,7 +285,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "gcePersistentDisk",
"gce.pd.name": "pd_name",
"fs.type": "fs_type",
@@ -292,7 +301,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{
+ want: map[string]interface{}{
"k8s.volume.type": "glusterfs",
"glusterfs.endpoints.name": "endspoints_name",
"glusterfs.path": "path",
@@ -302,12 +311,11 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
name: "unsupported type",
vs: v1.VolumeSource{},
args: []string{"uid-1234", "k8s.volume.type"},
- want: map[string]string{},
+ want: map[string]interface{}{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- fields := map[string]string{}
volName := "volume0"
metadata := NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, &v1.PodList{
Items: []v1.Pod{
@@ -325,11 +333,17 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
},
- }, func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- return nil
+ }, func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
+ return nil, nil
})
- metadata.setExtraLabels(fields, stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), volName)
- assert.Equal(t, tt.want, fields)
+ ro, _ := metadata.getExtraResources(stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), volName)
+
+ rm := pmetric.NewResourceMetrics()
+ for _, op := range ro {
+ op(rm)
+ }
+
+ assert.Equal(t, tt.want, rm.Resource().Attributes().AsRaw())
})
}
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
index 918a65ef47ec..f01b598db4b3 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go
@@ -20,19 +20,22 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
func MetricsData(
logger *zap.Logger, summary *stats.Summary,
metadata Metadata,
- metricGroupsToCollect map[MetricGroup]bool) []pmetric.Metrics {
+ metricGroupsToCollect map[MetricGroup]bool,
+ mbs *metadata.MetricsBuilders) []pmetric.Metrics {
acc := &metricDataAccumulator{
metadata: metadata,
logger: logger,
metricGroupsToCollect: metricGroupsToCollect,
time: time.Now(),
+ mbs: mbs,
}
-
acc.nodeStats(summary.Node)
for _, podStats := range summary.Pods {
acc.podStats(podStats)
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
index 34b606f4978a..43f3a9e7b0d0 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go
@@ -22,6 +22,8 @@ import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
type fakeRestClient struct {
@@ -41,11 +43,19 @@ func TestMetricAccumulator(t *testing.T) {
summary, _ := statsProvider.StatsSummary()
metadataProvider := NewMetadataProvider(rc)
podsMetadata, _ := metadataProvider.Pods()
- metadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, nil)
- requireMetricsOk(t, MetricsData(zap.NewNop(), summary, metadata, ValidMetricGroups))
-
+ k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, nil)
+ mbs := &metadata.MetricsBuilders{
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ ContainerMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ }
+ requireMetricsOk(t, MetricsData(zap.NewNop(), summary, k8sMetadata, ValidMetricGroups, mbs))
// Disable all groups
- require.Equal(t, 0, len(MetricsData(zap.NewNop(), summary, metadata, map[MetricGroup]bool{})))
+ mbs.NodeMetricsBuilder.Reset()
+ mbs.PodMetricsBuilder.Reset()
+ mbs.OtherMetricsBuilder.Reset()
+ require.Equal(t, 0, len(MetricsData(zap.NewNop(), summary, k8sMetadata, map[MetricGroup]bool{}, mbs)))
}
func requireMetricsOk(t *testing.T, mds []pmetric.Metrics) {
@@ -67,7 +77,6 @@ func requireMetricsOk(t *testing.T, mds []pmetric.Metrics) {
func requireMetricOk(t *testing.T, m pmetric.Metric) {
require.NotZero(t, m.Name())
require.NotEqual(t, pmetric.MetricDataTypeNone, m.DataType())
-
switch m.DataType() {
case pmetric.MetricDataTypeGauge:
gauge := m.Gauge()
@@ -166,5 +175,11 @@ func fakeMetrics() []pmetric.Metrics {
PodMetricGroup: true,
NodeMetricGroup: true,
}
- return MetricsData(zap.NewNop(), summary, Metadata{}, mgs)
+ mbs := &metadata.MetricsBuilders{
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ ContainerMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()),
+ }
+ return MetricsData(zap.NewNop(), summary, Metadata{}, mgs, mbs)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/network.go b/receiver/kubeletstatsreceiver/internal/kubelet/network.go
index c2e2c123d3be..5d494ce08e85 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/network.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/network.go
@@ -16,52 +16,25 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func addNetworkMetrics(dest pmetric.MetricSlice, networkMetrics metadata.NetworkMetrics, s *stats.NetworkStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
+func addNetworkMetrics(mb *metadata.MetricsBuilder, networkMetrics metadata.NetworkMetrics, s *stats.NetworkStats, currentTime pcommon.Timestamp) {
if s == nil {
return
}
- addNetworkIOMetric(dest, networkMetrics.IO, s, startTime, currentTime)
- addNetworkErrorsMetric(dest, networkMetrics.Errors, s, startTime, currentTime)
-}
-
-func addNetworkIOMetric(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, s *stats.NetworkStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
- if s.RxBytes == nil && s.TxBytes == nil {
- return
- }
-
- m := dest.AppendEmpty()
- metricInt.Init(m)
- fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Receive, s.RxBytes, startTime, currentTime)
- fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Transmit, s.TxBytes, startTime, currentTime)
+ recordNetworkDataPoint(mb, networkMetrics.IO, s, currentTime)
+ recordNetworkDataPoint(mb, networkMetrics.Errors, s, currentTime)
}
-func addNetworkErrorsMetric(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, s *stats.NetworkStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
+func recordNetworkDataPoint(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordIntDataPointWithDirectionFunc, s *stats.NetworkStats, currentTime pcommon.Timestamp) {
if s.RxBytes == nil && s.TxBytes == nil {
return
}
- m := dest.AppendEmpty()
- metricInt.Init(m)
-
- fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Receive, s.RxErrors, startTime, currentTime)
- fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Transmit, s.TxErrors, startTime, currentTime)
-}
-
-func fillNetworkDataPoint(dps pmetric.NumberDataPointSlice, interfaceName string, direction string, value *uint64, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
- if value == nil {
- return
- }
- dp := dps.AppendEmpty()
- dp.Attributes().UpsertString(metadata.A.Interface, interfaceName)
- dp.Attributes().UpsertString(metadata.A.Direction, direction)
- dp.SetIntVal(int64(*value))
- dp.SetStartTimestamp(startTime)
- dp.SetTimestamp(currentTime)
+ recordDataPoint(mb, currentTime, int64(*s.RxBytes), s.Name, metadata.AttributeDirectionReceive)
+ recordDataPoint(mb, currentTime, int64(*s.TxBytes), s.Name, metadata.AttributeDirectionTransmit)
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
index 83de0be16adc..5dcc152b9fec 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go
@@ -18,50 +18,44 @@ import (
"fmt"
"go.opentelemetry.io/collector/pdata/pcommon"
- conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
-)
-
-func fillNodeResource(dest pcommon.Resource, s stats.NodeStats) {
- dest.Attributes().UpsertString(conventions.AttributeK8SNodeName, s.NodeName)
-}
-func fillPodResource(dest pcommon.Resource, s stats.PodStats) {
- dest.Attributes().UpsertString(conventions.AttributeK8SPodUID, s.PodRef.UID)
- dest.Attributes().UpsertString(conventions.AttributeK8SPodName, s.PodRef.Name)
- dest.Attributes().UpsertString(conventions.AttributeK8SNamespaceName, s.PodRef.Namespace)
-}
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
+)
-func fillContainerResource(dest pcommon.Resource, sPod stats.PodStats, sContainer stats.ContainerStats, metadata Metadata) error {
- labels := map[string]string{
- conventions.AttributeK8SPodUID: sPod.PodRef.UID,
- conventions.AttributeK8SPodName: sPod.PodRef.Name,
- conventions.AttributeK8SNamespaceName: sPod.PodRef.Namespace,
- conventions.AttributeK8SContainerName: sContainer.Name,
- }
- if err := metadata.setExtraLabels(labels, sPod.PodRef, MetadataLabelContainerID, sContainer.Name); err != nil {
- return fmt.Errorf("failed to set extra labels from metadata: %w", err)
+func getContainerResourceOptions(sPod stats.PodStats, sContainer stats.ContainerStats, k8sMetadata Metadata) ([]metadata.ResourceMetricsOption, error) {
+ ro := []metadata.ResourceMetricsOption{
+ metadata.WithStartTimeOverride(pcommon.NewTimestampFromTime(sContainer.StartTime.Time)),
+ metadata.WithK8sPodUID(sPod.PodRef.UID),
+ metadata.WithK8sPodName(sPod.PodRef.Name),
+ metadata.WithK8sNamespaceName(sPod.PodRef.Namespace),
+ metadata.WithContainerName(sContainer.Name),
}
- for k, v := range labels {
- dest.Attributes().UpsertString(k, v)
+
+ extraResources, err := k8sMetadata.getExtraResources(sPod.PodRef, MetadataLabelContainerID, sContainer.Name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to set extra labels from metadata: %w", err)
}
- return nil
+
+ ro = append(ro, extraResources...)
+
+ return ro, nil
}
-func fillVolumeResource(dest pcommon.Resource, sPod stats.PodStats, vs stats.VolumeStats, metadata Metadata) error {
- labels := map[string]string{
- conventions.AttributeK8SPodUID: sPod.PodRef.UID,
- conventions.AttributeK8SPodName: sPod.PodRef.Name,
- conventions.AttributeK8SNamespaceName: sPod.PodRef.Namespace,
- labelVolumeName: vs.Name,
+func getVolumeResourceOptions(sPod stats.PodStats, vs stats.VolumeStats, k8sMetadata Metadata) ([]metadata.ResourceMetricsOption, error) {
+ ro := []metadata.ResourceMetricsOption{
+ metadata.WithK8sPodUID(sPod.PodRef.UID),
+ metadata.WithK8sPodName(sPod.PodRef.Name),
+ metadata.WithK8sNamespaceName(sPod.PodRef.Namespace),
+ metadata.WithK8sVolumeName(vs.Name),
}
- if err := metadata.setExtraLabels(labels, sPod.PodRef, MetadataLabelVolumeType, vs.Name); err != nil {
- return fmt.Errorf("failed to set extra labels from metadata: %w", err)
+ extraResources, err := k8sMetadata.getExtraResources(sPod.PodRef, MetadataLabelVolumeType, vs.Name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to set extra labels from metadata: %w", err)
}
- for k, v := range labels {
- dest.Attributes().UpsertString(k, v)
- }
- return nil
+ ro = append(ro, extraResources...)
+
+ return ro, nil
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
index 0ff47ebeefc4..bc12e5a0c5fa 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go
@@ -16,36 +16,13 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func fillDoubleGauge(dest pmetric.Metric, metricInt metadata.MetricIntf, value float64, currentTime pcommon.Timestamp) {
- metricInt.Init(dest)
- dp := dest.Gauge().DataPoints().AppendEmpty()
- dp.SetDoubleVal(value)
- dp.SetTimestamp(currentTime)
-}
-
-func addIntGauge(dest pmetric.MetricSlice, metricInt metadata.MetricIntf, value *uint64, currentTime pcommon.Timestamp) {
+func recordIntDataPoint(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordIntDataPointFunc, value *uint64, currentTime pcommon.Timestamp) {
if value == nil {
return
}
- fillIntGauge(dest.AppendEmpty(), metricInt, int64(*value), currentTime)
-}
-
-func fillIntGauge(dest pmetric.Metric, metricInt metadata.MetricIntf, value int64, currentTime pcommon.Timestamp) {
- metricInt.Init(dest)
- dp := dest.Gauge().DataPoints().AppendEmpty()
- dp.SetIntVal(value)
- dp.SetTimestamp(currentTime)
-}
-
-func fillDoubleSum(dest pmetric.Metric, metricInt metadata.MetricIntf, value float64, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) {
- metricInt.Init(dest)
- dp := dest.Sum().DataPoints().AppendEmpty()
- dp.SetDoubleVal(value)
- dp.SetStartTimestamp(startTime)
- dp.SetTimestamp(currentTime)
+ recordDataPoint(mb, currentTime, int64(*value))
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
index d2957ad59355..914a311ae762 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go
@@ -18,87 +18,94 @@ import (
"strconv"
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
v1 "k8s.io/api/core/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
-func addVolumeMetrics(dest pmetric.MetricSlice, volumeMetrics metadata.VolumeMetrics, s stats.VolumeStats, currentTime pcommon.Timestamp) {
- addIntGauge(dest, volumeMetrics.Available, s.AvailableBytes, currentTime)
- addIntGauge(dest, volumeMetrics.Capacity, s.CapacityBytes, currentTime)
- addIntGauge(dest, volumeMetrics.Inodes, s.Inodes, currentTime)
- addIntGauge(dest, volumeMetrics.InodesFree, s.InodesFree, currentTime)
- addIntGauge(dest, volumeMetrics.InodesUsed, s.InodesUsed, currentTime)
+func addVolumeMetrics(mb *metadata.MetricsBuilder, volumeMetrics metadata.VolumeMetrics, s stats.VolumeStats, currentTime pcommon.Timestamp) {
+ recordIntDataPoint(mb, volumeMetrics.Available, s.AvailableBytes, currentTime)
+ recordIntDataPoint(mb, volumeMetrics.Capacity, s.CapacityBytes, currentTime)
+ recordIntDataPoint(mb, volumeMetrics.Inodes, s.Inodes, currentTime)
+ recordIntDataPoint(mb, volumeMetrics.InodesFree, s.InodesFree, currentTime)
+ recordIntDataPoint(mb, volumeMetrics.InodesUsed, s.InodesUsed, currentTime)
}
-func getLabelsFromVolume(volume v1.Volume, labels map[string]string) {
+func getResourcesFromVolume(volume v1.Volume) []metadata.ResourceMetricsOption {
switch {
// TODO: Support more types
case volume.ConfigMap != nil:
- labels[labelVolumeType] = labelValueConfigMapVolume
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueConfigMapVolume)}
case volume.DownwardAPI != nil:
- labels[labelVolumeType] = labelValueDownwardAPIVolume
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueDownwardAPIVolume)}
case volume.EmptyDir != nil:
- labels[labelVolumeType] = labelValueEmptyDirVolume
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueEmptyDirVolume)}
case volume.Secret != nil:
- labels[labelVolumeType] = labelValueSecretVolume
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueSecretVolume)}
case volume.PersistentVolumeClaim != nil:
- labels[labelVolumeType] = labelValuePersistentVolumeClaim
- labels[labelPersistentVolumeClaimName] = volume.PersistentVolumeClaim.ClaimName
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValuePersistentVolumeClaim),
+ metadata.WithK8sPersistentvolumeclaimName(volume.PersistentVolumeClaim.ClaimName)}
case volume.HostPath != nil:
- labels[labelVolumeType] = labelValueHostPathVolume
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueHostPathVolume)}
case volume.AWSElasticBlockStore != nil:
- awsElasticBlockStoreDims(*volume.AWSElasticBlockStore, labels)
+ return awsElasticBlockStoreDims(*volume.AWSElasticBlockStore)
case volume.GCEPersistentDisk != nil:
- gcePersistentDiskDims(*volume.GCEPersistentDisk, labels)
+ return gcePersistentDiskDims(*volume.GCEPersistentDisk)
case volume.Glusterfs != nil:
- glusterfsDims(*volume.Glusterfs, labels)
+ return glusterfsDims(*volume.Glusterfs)
}
+ return nil
}
-func GetPersistentVolumeLabels(pv v1.PersistentVolumeSource, labels map[string]string) {
+func GetPersistentVolumeLabels(pv v1.PersistentVolumeSource) []metadata.ResourceMetricsOption {
// TODO: Support more types
switch {
case pv.Local != nil:
- labels[labelVolumeType] = labelValueLocalVolume
+ return []metadata.ResourceMetricsOption{metadata.WithK8sVolumeType(labelValueLocalVolume)}
case pv.AWSElasticBlockStore != nil:
- awsElasticBlockStoreDims(*pv.AWSElasticBlockStore, labels)
+ return awsElasticBlockStoreDims(*pv.AWSElasticBlockStore)
case pv.GCEPersistentDisk != nil:
- gcePersistentDiskDims(*pv.GCEPersistentDisk, labels)
+ return gcePersistentDiskDims(*pv.GCEPersistentDisk)
case pv.Glusterfs != nil:
// pv.Glusterfs is a GlusterfsPersistentVolumeSource instead of GlusterfsVolumeSource,
// convert to GlusterfsVolumeSource so a single method can handle both structs. This
// can be broken out into separate methods if one is interested in different sets
// of labels from the two structs in the future.
- glusterfsDims(v1.GlusterfsVolumeSource{
+ return glusterfsDims(v1.GlusterfsVolumeSource{
EndpointsName: pv.Glusterfs.EndpointsName,
Path: pv.Glusterfs.Path,
ReadOnly: pv.Glusterfs.ReadOnly,
- }, labels)
+ })
}
+ return nil
}
-func awsElasticBlockStoreDims(vs v1.AWSElasticBlockStoreVolumeSource, labels map[string]string) {
- labels[labelVolumeType] = labelValueAWSEBSVolume
- // AWS specific labels.
- labels["aws.volume.id"] = vs.VolumeID
- labels["fs.type"] = vs.FSType
- labels["partition"] = strconv.Itoa(int(vs.Partition))
+func awsElasticBlockStoreDims(vs v1.AWSElasticBlockStoreVolumeSource) []metadata.ResourceMetricsOption {
+ return []metadata.ResourceMetricsOption{
+ metadata.WithK8sVolumeType(labelValueAWSEBSVolume),
+ // AWS specific labels.
+ metadata.WithAwsVolumeID(vs.VolumeID),
+ metadata.WithFsType(vs.FSType),
+ metadata.WithPartition(strconv.Itoa(int(vs.Partition))),
+ }
}
-func gcePersistentDiskDims(vs v1.GCEPersistentDiskVolumeSource, labels map[string]string) {
- labels[labelVolumeType] = labelValueGCEPDVolume
- // GCP specific labels.
- labels["gce.pd.name"] = vs.PDName
- labels["fs.type"] = vs.FSType
- labels["partition"] = strconv.Itoa(int(vs.Partition))
+func gcePersistentDiskDims(vs v1.GCEPersistentDiskVolumeSource) []metadata.ResourceMetricsOption {
+ return []metadata.ResourceMetricsOption{
+ metadata.WithK8sVolumeType(labelValueGCEPDVolume),
+ // GCP specific labels.
+ metadata.WithGcePdName(vs.PDName),
+ metadata.WithFsType(vs.FSType),
+ metadata.WithPartition(strconv.Itoa(int(vs.Partition))),
+ }
}
-func glusterfsDims(vs v1.GlusterfsVolumeSource, labels map[string]string) {
- labels[labelVolumeType] = labelValueGlusterFSVolume
- // GlusterFS specific labels.
- labels["glusterfs.endpoints.name"] = vs.EndpointsName
- labels["glusterfs.path"] = vs.Path
+func glusterfsDims(vs v1.GlusterfsVolumeSource) []metadata.ResourceMetricsOption {
+ return []metadata.ResourceMetricsOption{
+ metadata.WithK8sVolumeType(labelValueGlusterFSVolume),
+ // GlusterFS specific labels.
+ metadata.WithGlusterfsEndpointsName(vs.EndpointsName),
+ metadata.WithGlusterfsPath(vs.Path),
+ }
}
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
index dcb0c42577ff..a4c713528a8a 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go
@@ -19,10 +19,13 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
type pod struct {
@@ -38,7 +41,7 @@ func TestDetailedPVCLabels(t *testing.T) {
volumeName string
volumeSource v1.VolumeSource
pod pod
- detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string, labels map[string]string) error
+ detailedPVCLabelsSetterOverride func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)
want map[string]interface{}
}{
{
@@ -50,15 +53,15 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- GetPersistentVolumeLabels(v1.PersistentVolumeSource{
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
+ ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "volume_id",
FSType: "fs_type",
Partition: 10,
},
- }, labels)
- return nil
+ })
+ return ro, nil
},
want: map[string]interface{}{
"k8s.volume.name": "volume0",
@@ -81,15 +84,15 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- GetPersistentVolumeLabels(v1.PersistentVolumeSource{
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
+ ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "pd_name",
FSType: "fs_type",
Partition: 10,
},
- }, labels)
- return nil
+ })
+ return ro, nil
},
want: map[string]interface{}{
"k8s.volume.name": "volume0",
@@ -112,14 +115,14 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- GetPersistentVolumeLabels(v1.PersistentVolumeSource{
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
+ ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsPersistentVolumeSource{
EndpointsName: "endpoints_name",
Path: "path",
},
- }, labels)
- return nil
+ })
+ return ro, nil
},
want: map[string]interface{}{
"k8s.volume.name": "volume0",
@@ -141,13 +144,13 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
pod: pod{uid: "uid-1234", name: "pod-name", namespace: "pod-namespace"},
- detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- GetPersistentVolumeLabels(v1.PersistentVolumeSource{
+ detailedPVCLabelsSetterOverride: func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
+ ro := GetPersistentVolumeLabels(v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{
Path: "path",
},
- }, labels)
- return nil
+ })
+ return ro, nil
},
want: map[string]interface{}{
"k8s.volume.name": "volume0",
@@ -187,12 +190,17 @@ func TestDetailedPVCLabels(t *testing.T) {
},
},
}, nil)
- metadata.DetailedPVCLabelsSetter = tt.detailedPVCLabelsSetterOverride
+ metadata.DetailedPVCResourceGetter = tt.detailedPVCLabelsSetterOverride
- volumeResource := pcommon.NewResource()
- err := fillVolumeResource(volumeResource, podStats, stats.VolumeStats{Name: tt.volumeName}, metadata)
+ ro, err := getVolumeResourceOptions(podStats, stats.VolumeStats{Name: tt.volumeName}, metadata)
require.NoError(t, err)
- require.Equal(t, pcommon.NewMapFromRaw(tt.want).Sort(), volumeResource.Attributes().Sort())
+
+ volumeResourceMetrics := pmetric.NewResourceMetrics()
+ for _, op := range ro {
+ op(volumeResourceMetrics)
+ }
+
+ require.Equal(t, pcommon.NewMapFromRaw(tt.want).Sort(), volumeResourceMetrics.Resource().Attributes().Sort())
})
}
}
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
deleted file mode 100644
index 92de0a9bf282..000000000000
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
+++ /dev/null
@@ -1,607 +0,0 @@
-// Code generated by mdatagen. DO NOT EDIT.
-
-package metadata
-
-import (
- "go.opentelemetry.io/collector/config"
- "go.opentelemetry.io/collector/pdata/pmetric"
-)
-
-// Type is the component type name.
-const Type config.Type = "kubeletstatsreceiver"
-
-// MetricIntf is an interface to generically interact with generated metric.
-type MetricIntf interface {
- Name() string
- New() pmetric.Metric
- Init(metric pmetric.Metric)
-}
-
-// Intentionally not exposing this so that it is opaque and can change freely.
-type metricImpl struct {
- name string
- initFunc func(pmetric.Metric)
-}
-
-// Name returns the metric name.
-func (m *metricImpl) Name() string {
- return m.name
-}
-
-// New creates a metric object preinitialized.
-func (m *metricImpl) New() pmetric.Metric {
- metric := pmetric.NewMetric()
- m.Init(metric)
- return metric
-}
-
-// Init initializes the provided metric object.
-func (m *metricImpl) Init(metric pmetric.Metric) {
- m.initFunc(metric)
-}
-
-type metricStruct struct {
- ContainerCPUTime MetricIntf
- ContainerCPUUtilization MetricIntf
- ContainerFilesystemAvailable MetricIntf
- ContainerFilesystemCapacity MetricIntf
- ContainerFilesystemUsage MetricIntf
- ContainerMemoryAvailable MetricIntf
- ContainerMemoryMajorPageFaults MetricIntf
- ContainerMemoryPageFaults MetricIntf
- ContainerMemoryRss MetricIntf
- ContainerMemoryUsage MetricIntf
- ContainerMemoryWorkingSet MetricIntf
- K8sNodeCPUTime MetricIntf
- K8sNodeCPUUtilization MetricIntf
- K8sNodeFilesystemAvailable MetricIntf
- K8sNodeFilesystemCapacity MetricIntf
- K8sNodeFilesystemUsage MetricIntf
- K8sNodeMemoryAvailable MetricIntf
- K8sNodeMemoryMajorPageFaults MetricIntf
- K8sNodeMemoryPageFaults MetricIntf
- K8sNodeMemoryRss MetricIntf
- K8sNodeMemoryUsage MetricIntf
- K8sNodeMemoryWorkingSet MetricIntf
- K8sNodeNetworkErrors MetricIntf
- K8sNodeNetworkIo MetricIntf
- K8sPodCPUTime MetricIntf
- K8sPodCPUUtilization MetricIntf
- K8sPodFilesystemAvailable MetricIntf
- K8sPodFilesystemCapacity MetricIntf
- K8sPodFilesystemUsage MetricIntf
- K8sPodMemoryAvailable MetricIntf
- K8sPodMemoryMajorPageFaults MetricIntf
- K8sPodMemoryPageFaults MetricIntf
- K8sPodMemoryRss MetricIntf
- K8sPodMemoryUsage MetricIntf
- K8sPodMemoryWorkingSet MetricIntf
- K8sPodNetworkErrors MetricIntf
- K8sPodNetworkIo MetricIntf
- K8sVolumeAvailable MetricIntf
- K8sVolumeCapacity MetricIntf
- K8sVolumeInodes MetricIntf
- K8sVolumeInodesFree MetricIntf
- K8sVolumeInodesUsed MetricIntf
-}
-
-// Names returns a list of all the metric name strings.
-func (m *metricStruct) Names() []string {
- return []string{
- "container.cpu.time",
- "container.cpu.utilization",
- "container.filesystem.available",
- "container.filesystem.capacity",
- "container.filesystem.usage",
- "container.memory.available",
- "container.memory.major_page_faults",
- "container.memory.page_faults",
- "container.memory.rss",
- "container.memory.usage",
- "container.memory.working_set",
- "k8s.node.cpu.time",
- "k8s.node.cpu.utilization",
- "k8s.node.filesystem.available",
- "k8s.node.filesystem.capacity",
- "k8s.node.filesystem.usage",
- "k8s.node.memory.available",
- "k8s.node.memory.major_page_faults",
- "k8s.node.memory.page_faults",
- "k8s.node.memory.rss",
- "k8s.node.memory.usage",
- "k8s.node.memory.working_set",
- "k8s.node.network.errors",
- "k8s.node.network.io",
- "k8s.pod.cpu.time",
- "k8s.pod.cpu.utilization",
- "k8s.pod.filesystem.available",
- "k8s.pod.filesystem.capacity",
- "k8s.pod.filesystem.usage",
- "k8s.pod.memory.available",
- "k8s.pod.memory.major_page_faults",
- "k8s.pod.memory.page_faults",
- "k8s.pod.memory.rss",
- "k8s.pod.memory.usage",
- "k8s.pod.memory.working_set",
- "k8s.pod.network.errors",
- "k8s.pod.network.io",
- "k8s.volume.available",
- "k8s.volume.capacity",
- "k8s.volume.inodes",
- "k8s.volume.inodes.free",
- "k8s.volume.inodes.used",
- }
-}
-
-var metricsByName = map[string]MetricIntf{
- "container.cpu.time": Metrics.ContainerCPUTime,
- "container.cpu.utilization": Metrics.ContainerCPUUtilization,
- "container.filesystem.available": Metrics.ContainerFilesystemAvailable,
- "container.filesystem.capacity": Metrics.ContainerFilesystemCapacity,
- "container.filesystem.usage": Metrics.ContainerFilesystemUsage,
- "container.memory.available": Metrics.ContainerMemoryAvailable,
- "container.memory.major_page_faults": Metrics.ContainerMemoryMajorPageFaults,
- "container.memory.page_faults": Metrics.ContainerMemoryPageFaults,
- "container.memory.rss": Metrics.ContainerMemoryRss,
- "container.memory.usage": Metrics.ContainerMemoryUsage,
- "container.memory.working_set": Metrics.ContainerMemoryWorkingSet,
- "k8s.node.cpu.time": Metrics.K8sNodeCPUTime,
- "k8s.node.cpu.utilization": Metrics.K8sNodeCPUUtilization,
- "k8s.node.filesystem.available": Metrics.K8sNodeFilesystemAvailable,
- "k8s.node.filesystem.capacity": Metrics.K8sNodeFilesystemCapacity,
- "k8s.node.filesystem.usage": Metrics.K8sNodeFilesystemUsage,
- "k8s.node.memory.available": Metrics.K8sNodeMemoryAvailable,
- "k8s.node.memory.major_page_faults": Metrics.K8sNodeMemoryMajorPageFaults,
- "k8s.node.memory.page_faults": Metrics.K8sNodeMemoryPageFaults,
- "k8s.node.memory.rss": Metrics.K8sNodeMemoryRss,
- "k8s.node.memory.usage": Metrics.K8sNodeMemoryUsage,
- "k8s.node.memory.working_set": Metrics.K8sNodeMemoryWorkingSet,
- "k8s.node.network.errors": Metrics.K8sNodeNetworkErrors,
- "k8s.node.network.io": Metrics.K8sNodeNetworkIo,
- "k8s.pod.cpu.time": Metrics.K8sPodCPUTime,
- "k8s.pod.cpu.utilization": Metrics.K8sPodCPUUtilization,
- "k8s.pod.filesystem.available": Metrics.K8sPodFilesystemAvailable,
- "k8s.pod.filesystem.capacity": Metrics.K8sPodFilesystemCapacity,
- "k8s.pod.filesystem.usage": Metrics.K8sPodFilesystemUsage,
- "k8s.pod.memory.available": Metrics.K8sPodMemoryAvailable,
- "k8s.pod.memory.major_page_faults": Metrics.K8sPodMemoryMajorPageFaults,
- "k8s.pod.memory.page_faults": Metrics.K8sPodMemoryPageFaults,
- "k8s.pod.memory.rss": Metrics.K8sPodMemoryRss,
- "k8s.pod.memory.usage": Metrics.K8sPodMemoryUsage,
- "k8s.pod.memory.working_set": Metrics.K8sPodMemoryWorkingSet,
- "k8s.pod.network.errors": Metrics.K8sPodNetworkErrors,
- "k8s.pod.network.io": Metrics.K8sPodNetworkIo,
- "k8s.volume.available": Metrics.K8sVolumeAvailable,
- "k8s.volume.capacity": Metrics.K8sVolumeCapacity,
- "k8s.volume.inodes": Metrics.K8sVolumeInodes,
- "k8s.volume.inodes.free": Metrics.K8sVolumeInodesFree,
- "k8s.volume.inodes.used": Metrics.K8sVolumeInodesUsed,
-}
-
-func (m *metricStruct) ByName(n string) MetricIntf {
- return metricsByName[n]
-}
-
-// Metrics contains a set of methods for each metric that help with
-// manipulating those metrics.
-var Metrics = &metricStruct{
- &metricImpl{
- "container.cpu.time",
- func(metric pmetric.Metric) {
- metric.SetName("container.cpu.time")
- metric.SetDescription("Container CPU time")
- metric.SetUnit("s")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "container.cpu.utilization",
- func(metric pmetric.Metric) {
- metric.SetName("container.cpu.utilization")
- metric.SetDescription("Container CPU utilization")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.filesystem.available",
- func(metric pmetric.Metric) {
- metric.SetName("container.filesystem.available")
- metric.SetDescription("Container filesystem available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.filesystem.capacity",
- func(metric pmetric.Metric) {
- metric.SetName("container.filesystem.capacity")
- metric.SetDescription("Container filesystem capacity")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.filesystem.usage",
- func(metric pmetric.Metric) {
- metric.SetName("container.filesystem.usage")
- metric.SetDescription("Container filesystem usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.available",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.available")
- metric.SetDescription("Container memory available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.major_page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.major_page_faults")
- metric.SetDescription("Container memory major_page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.page_faults")
- metric.SetDescription("Container memory page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.rss",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.rss")
- metric.SetDescription("Container memory rss")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.usage",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.usage")
- metric.SetDescription("Container memory usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "container.memory.working_set",
- func(metric pmetric.Metric) {
- metric.SetName("container.memory.working_set")
- metric.SetDescription("Container memory working_set")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.cpu.time",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.cpu.time")
- metric.SetDescription("Node CPU time")
- metric.SetUnit("s")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.node.cpu.utilization",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.cpu.utilization")
- metric.SetDescription("Node CPU utilization")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.filesystem.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.filesystem.available")
- metric.SetDescription("Node filesystem available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.filesystem.capacity",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.filesystem.capacity")
- metric.SetDescription("Node filesystem capacity")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.filesystem.usage",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.filesystem.usage")
- metric.SetDescription("Node filesystem usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.available")
- metric.SetDescription("Node memory available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.major_page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.major_page_faults")
- metric.SetDescription("Node memory major_page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.page_faults")
- metric.SetDescription("Node memory page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.rss",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.rss")
- metric.SetDescription("Node memory rss")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.usage",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.usage")
- metric.SetDescription("Node memory usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.memory.working_set",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.memory.working_set")
- metric.SetDescription("Node memory working_set")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.node.network.errors",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.network.errors")
- metric.SetDescription("Node network errors")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.node.network.io",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.node.network.io")
- metric.SetDescription("Node network IO")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.pod.cpu.time",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.cpu.time")
- metric.SetDescription("Pod CPU time")
- metric.SetUnit("s")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.pod.cpu.utilization",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.cpu.utilization")
- metric.SetDescription("Pod CPU utilization")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.filesystem.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.filesystem.available")
- metric.SetDescription("Pod filesystem available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.filesystem.capacity",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.filesystem.capacity")
- metric.SetDescription("Pod filesystem capacity")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.filesystem.usage",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.filesystem.usage")
- metric.SetDescription("Pod filesystem usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.available")
- metric.SetDescription("Pod memory available")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.major_page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.major_page_faults")
- metric.SetDescription("Pod memory major_page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.page_faults",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.page_faults")
- metric.SetDescription("Pod memory page_faults")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.rss",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.rss")
- metric.SetDescription("Pod memory rss")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.usage",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.usage")
- metric.SetDescription("Pod memory usage")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.memory.working_set",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.memory.working_set")
- metric.SetDescription("Pod memory working_set")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.pod.network.errors",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.network.errors")
- metric.SetDescription("Pod network errors")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.pod.network.io",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.pod.network.io")
- metric.SetDescription("Pod network IO")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "k8s.volume.available",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.available")
- metric.SetDescription("The number of available bytes in the volume.")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.volume.capacity",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.capacity")
- metric.SetDescription("The total capacity in bytes of the volume.")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.volume.inodes",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.inodes")
- metric.SetDescription("The total inodes in the filesystem.")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.volume.inodes.free",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.inodes.free")
- metric.SetDescription("The free inodes in the filesystem.")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "k8s.volume.inodes.used",
- func(metric pmetric.Metric) {
- metric.SetName("k8s.volume.inodes.used")
- metric.SetDescription("The inodes used by the filesystem. This may not equal inodes - free because filesystem may share inodes with other filesystems.")
- metric.SetUnit("1")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
-}
-
-// M contains a set of methods for each metric that help with
-// manipulating those metrics. M is an alias for Metrics
-var M = Metrics
-
-// Attributes contains the possible metric attributes that can be used.
-var Attributes = struct {
- // Direction (Direction of flow of bytes/operations (receive or transmit).)
- Direction string
- // Interface (Name of the network interface.)
- Interface string
-}{
- "direction",
- "interface",
-}
-
-// A is an alias for Attributes.
-var A = Attributes
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Receive string
- Transmit string
-}{
- "receive",
- "transmit",
-}
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
new file mode 100644
index 000000000000..e92dd632cb5c
--- /dev/null
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_v2.go
@@ -0,0 +1,2845 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "time"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+)
+
+// MetricSettings provides common settings for a particular metric.
+type MetricSettings struct {
+ Enabled bool `mapstructure:"enabled"`
+}
+
+// MetricsSettings provides settings for kubeletstatsreceiver metrics.
+type MetricsSettings struct {
+ ContainerCPUTime MetricSettings `mapstructure:"container.cpu.time"`
+ ContainerCPUUtilization MetricSettings `mapstructure:"container.cpu.utilization"`
+ ContainerFilesystemAvailable MetricSettings `mapstructure:"container.filesystem.available"`
+ ContainerFilesystemCapacity MetricSettings `mapstructure:"container.filesystem.capacity"`
+ ContainerFilesystemUsage MetricSettings `mapstructure:"container.filesystem.usage"`
+ ContainerMemoryAvailable MetricSettings `mapstructure:"container.memory.available"`
+ ContainerMemoryMajorPageFaults MetricSettings `mapstructure:"container.memory.major_page_faults"`
+ ContainerMemoryPageFaults MetricSettings `mapstructure:"container.memory.page_faults"`
+ ContainerMemoryRss MetricSettings `mapstructure:"container.memory.rss"`
+ ContainerMemoryUsage MetricSettings `mapstructure:"container.memory.usage"`
+ ContainerMemoryWorkingSet MetricSettings `mapstructure:"container.memory.working_set"`
+ K8sNodeCPUTime MetricSettings `mapstructure:"k8s.node.cpu.time"`
+ K8sNodeCPUUtilization MetricSettings `mapstructure:"k8s.node.cpu.utilization"`
+ K8sNodeFilesystemAvailable MetricSettings `mapstructure:"k8s.node.filesystem.available"`
+ K8sNodeFilesystemCapacity MetricSettings `mapstructure:"k8s.node.filesystem.capacity"`
+ K8sNodeFilesystemUsage MetricSettings `mapstructure:"k8s.node.filesystem.usage"`
+ K8sNodeMemoryAvailable MetricSettings `mapstructure:"k8s.node.memory.available"`
+ K8sNodeMemoryMajorPageFaults MetricSettings `mapstructure:"k8s.node.memory.major_page_faults"`
+ K8sNodeMemoryPageFaults MetricSettings `mapstructure:"k8s.node.memory.page_faults"`
+ K8sNodeMemoryRss MetricSettings `mapstructure:"k8s.node.memory.rss"`
+ K8sNodeMemoryUsage MetricSettings `mapstructure:"k8s.node.memory.usage"`
+ K8sNodeMemoryWorkingSet MetricSettings `mapstructure:"k8s.node.memory.working_set"`
+ K8sNodeNetworkErrors MetricSettings `mapstructure:"k8s.node.network.errors"`
+ K8sNodeNetworkIo MetricSettings `mapstructure:"k8s.node.network.io"`
+ K8sPodCPUTime MetricSettings `mapstructure:"k8s.pod.cpu.time"`
+ K8sPodCPUUtilization MetricSettings `mapstructure:"k8s.pod.cpu.utilization"`
+ K8sPodFilesystemAvailable MetricSettings `mapstructure:"k8s.pod.filesystem.available"`
+ K8sPodFilesystemCapacity MetricSettings `mapstructure:"k8s.pod.filesystem.capacity"`
+ K8sPodFilesystemUsage MetricSettings `mapstructure:"k8s.pod.filesystem.usage"`
+ K8sPodMemoryAvailable MetricSettings `mapstructure:"k8s.pod.memory.available"`
+ K8sPodMemoryMajorPageFaults MetricSettings `mapstructure:"k8s.pod.memory.major_page_faults"`
+ K8sPodMemoryPageFaults MetricSettings `mapstructure:"k8s.pod.memory.page_faults"`
+ K8sPodMemoryRss MetricSettings `mapstructure:"k8s.pod.memory.rss"`
+ K8sPodMemoryUsage MetricSettings `mapstructure:"k8s.pod.memory.usage"`
+ K8sPodMemoryWorkingSet MetricSettings `mapstructure:"k8s.pod.memory.working_set"`
+ K8sPodNetworkErrors MetricSettings `mapstructure:"k8s.pod.network.errors"`
+ K8sPodNetworkIo MetricSettings `mapstructure:"k8s.pod.network.io"`
+ K8sVolumeAvailable MetricSettings `mapstructure:"k8s.volume.available"`
+ K8sVolumeCapacity MetricSettings `mapstructure:"k8s.volume.capacity"`
+ K8sVolumeInodes MetricSettings `mapstructure:"k8s.volume.inodes"`
+ K8sVolumeInodesFree MetricSettings `mapstructure:"k8s.volume.inodes.free"`
+ K8sVolumeInodesUsed MetricSettings `mapstructure:"k8s.volume.inodes.used"`
+}
+
+func DefaultMetricsSettings() MetricsSettings {
+ return MetricsSettings{
+ ContainerCPUTime: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUUtilization: MetricSettings{
+ Enabled: true,
+ },
+ ContainerFilesystemAvailable: MetricSettings{
+ Enabled: true,
+ },
+ ContainerFilesystemCapacity: MetricSettings{
+ Enabled: true,
+ },
+ ContainerFilesystemUsage: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryAvailable: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryMajorPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryRss: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryUsage: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryWorkingSet: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeCPUTime: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeCPUUtilization: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeFilesystemAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeFilesystemCapacity: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeFilesystemUsage: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryMajorPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryRss: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryUsage: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeMemoryWorkingSet: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeNetworkErrors: MetricSettings{
+ Enabled: true,
+ },
+ K8sNodeNetworkIo: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodCPUTime: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodCPUUtilization: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodFilesystemAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodFilesystemCapacity: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodFilesystemUsage: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryMajorPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryPageFaults: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryRss: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryUsage: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodMemoryWorkingSet: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodNetworkErrors: MetricSettings{
+ Enabled: true,
+ },
+ K8sPodNetworkIo: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeAvailable: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeCapacity: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeInodes: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeInodesFree: MetricSettings{
+ Enabled: true,
+ },
+ K8sVolumeInodesUsed: MetricSettings{
+ Enabled: true,
+ },
+ }
+}
+
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionReceive
+ AttributeDirectionTransmit
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionReceive:
+ return "receive"
+ case AttributeDirectionTransmit:
+ return "transmit"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "receive": AttributeDirectionReceive,
+ "transmit": AttributeDirectionTransmit,
+}
+
+type metricContainerCPUTime struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.time metric with initial data.
+func (m *metricContainerCPUTime) init() {
+ m.data.SetName("container.cpu.time")
+ m.data.SetDescription("Container CPU time")
+ m.data.SetUnit("s")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUTime) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUTime) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUTime(settings MetricSettings) metricContainerCPUTime {
+ m := metricContainerCPUTime{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUUtilization struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.utilization metric with initial data.
+func (m *metricContainerCPUUtilization) init() {
+ m.data.SetName("container.cpu.utilization")
+ m.data.SetDescription("Container CPU utilization")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUUtilization) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUUtilization) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUUtilization(settings MetricSettings) metricContainerCPUUtilization {
+ m := metricContainerCPUUtilization{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerFilesystemAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.filesystem.available metric with initial data.
+func (m *metricContainerFilesystemAvailable) init() {
+ m.data.SetName("container.filesystem.available")
+ m.data.SetDescription("Container filesystem available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerFilesystemAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerFilesystemAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerFilesystemAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerFilesystemAvailable(settings MetricSettings) metricContainerFilesystemAvailable {
+ m := metricContainerFilesystemAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerFilesystemCapacity struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.filesystem.capacity metric with initial data.
+func (m *metricContainerFilesystemCapacity) init() {
+ m.data.SetName("container.filesystem.capacity")
+ m.data.SetDescription("Container filesystem capacity")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerFilesystemCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerFilesystemCapacity) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerFilesystemCapacity) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerFilesystemCapacity(settings MetricSettings) metricContainerFilesystemCapacity {
+ m := metricContainerFilesystemCapacity{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerFilesystemUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.filesystem.usage metric with initial data.
+func (m *metricContainerFilesystemUsage) init() {
+ m.data.SetName("container.filesystem.usage")
+ m.data.SetDescription("Container filesystem usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerFilesystemUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerFilesystemUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerFilesystemUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerFilesystemUsage(settings MetricSettings) metricContainerFilesystemUsage {
+ m := metricContainerFilesystemUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.available metric with initial data.
+func (m *metricContainerMemoryAvailable) init() {
+ m.data.SetName("container.memory.available")
+ m.data.SetDescription("Container memory available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryAvailable(settings MetricSettings) metricContainerMemoryAvailable {
+ m := metricContainerMemoryAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryMajorPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.major_page_faults metric with initial data.
+func (m *metricContainerMemoryMajorPageFaults) init() {
+ m.data.SetName("container.memory.major_page_faults")
+ m.data.SetDescription("Container memory major_page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryMajorPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryMajorPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryMajorPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryMajorPageFaults(settings MetricSettings) metricContainerMemoryMajorPageFaults {
+ m := metricContainerMemoryMajorPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.page_faults metric with initial data.
+func (m *metricContainerMemoryPageFaults) init() {
+ m.data.SetName("container.memory.page_faults")
+ m.data.SetDescription("Container memory page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryPageFaults(settings MetricSettings) metricContainerMemoryPageFaults {
+ m := metricContainerMemoryPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryRss struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.rss metric with initial data.
+func (m *metricContainerMemoryRss) init() {
+ m.data.SetName("container.memory.rss")
+ m.data.SetDescription("Container memory rss")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryRss) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryRss) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryRss(settings MetricSettings) metricContainerMemoryRss {
+ m := metricContainerMemoryRss{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.usage metric with initial data.
+func (m *metricContainerMemoryUsage) init() {
+ m.data.SetName("container.memory.usage")
+ m.data.SetDescription("Container memory usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryUsage(settings MetricSettings) metricContainerMemoryUsage {
+ m := metricContainerMemoryUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryWorkingSet struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.working_set metric with initial data.
+func (m *metricContainerMemoryWorkingSet) init() {
+ m.data.SetName("container.memory.working_set")
+ m.data.SetDescription("Container memory working_set")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryWorkingSet) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryWorkingSet) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryWorkingSet) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryWorkingSet(settings MetricSettings) metricContainerMemoryWorkingSet {
+ m := metricContainerMemoryWorkingSet{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeCPUTime struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.cpu.time metric with initial data.
+func (m *metricK8sNodeCPUTime) init() {
+ m.data.SetName("k8s.node.cpu.time")
+ m.data.SetDescription("Node CPU time")
+ m.data.SetUnit("s")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricK8sNodeCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeCPUTime) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeCPUTime) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeCPUTime(settings MetricSettings) metricK8sNodeCPUTime {
+ m := metricK8sNodeCPUTime{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeCPUUtilization struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.cpu.utilization metric with initial data.
+func (m *metricK8sNodeCPUUtilization) init() {
+ m.data.SetName("k8s.node.cpu.utilization")
+ m.data.SetDescription("Node CPU utilization")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeCPUUtilization) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeCPUUtilization) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeCPUUtilization(settings MetricSettings) metricK8sNodeCPUUtilization {
+ m := metricK8sNodeCPUUtilization{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeFilesystemAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.filesystem.available metric with initial data.
+func (m *metricK8sNodeFilesystemAvailable) init() {
+ m.data.SetName("k8s.node.filesystem.available")
+ m.data.SetDescription("Node filesystem available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeFilesystemAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeFilesystemAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeFilesystemAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeFilesystemAvailable(settings MetricSettings) metricK8sNodeFilesystemAvailable {
+ m := metricK8sNodeFilesystemAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeFilesystemCapacity struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.filesystem.capacity metric with initial data.
+func (m *metricK8sNodeFilesystemCapacity) init() {
+ m.data.SetName("k8s.node.filesystem.capacity")
+ m.data.SetDescription("Node filesystem capacity")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeFilesystemCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeFilesystemCapacity) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeFilesystemCapacity) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeFilesystemCapacity(settings MetricSettings) metricK8sNodeFilesystemCapacity {
+ m := metricK8sNodeFilesystemCapacity{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeFilesystemUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.filesystem.usage metric with initial data.
+func (m *metricK8sNodeFilesystemUsage) init() {
+ m.data.SetName("k8s.node.filesystem.usage")
+ m.data.SetDescription("Node filesystem usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeFilesystemUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeFilesystemUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeFilesystemUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeFilesystemUsage(settings MetricSettings) metricK8sNodeFilesystemUsage {
+ m := metricK8sNodeFilesystemUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.available metric with initial data.
+func (m *metricK8sNodeMemoryAvailable) init() {
+ m.data.SetName("k8s.node.memory.available")
+ m.data.SetDescription("Node memory available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryAvailable(settings MetricSettings) metricK8sNodeMemoryAvailable {
+ m := metricK8sNodeMemoryAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryMajorPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.major_page_faults metric with initial data.
+func (m *metricK8sNodeMemoryMajorPageFaults) init() {
+ m.data.SetName("k8s.node.memory.major_page_faults")
+ m.data.SetDescription("Node memory major_page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryMajorPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryMajorPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryMajorPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryMajorPageFaults(settings MetricSettings) metricK8sNodeMemoryMajorPageFaults {
+ m := metricK8sNodeMemoryMajorPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.page_faults metric with initial data.
+func (m *metricK8sNodeMemoryPageFaults) init() {
+ m.data.SetName("k8s.node.memory.page_faults")
+ m.data.SetDescription("Node memory page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryPageFaults(settings MetricSettings) metricK8sNodeMemoryPageFaults {
+ m := metricK8sNodeMemoryPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryRss struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.rss metric with initial data.
+func (m *metricK8sNodeMemoryRss) init() {
+ m.data.SetName("k8s.node.memory.rss")
+ m.data.SetDescription("Node memory rss")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryRss) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryRss) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryRss(settings MetricSettings) metricK8sNodeMemoryRss {
+ m := metricK8sNodeMemoryRss{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.usage metric with initial data.
+func (m *metricK8sNodeMemoryUsage) init() {
+ m.data.SetName("k8s.node.memory.usage")
+ m.data.SetDescription("Node memory usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryUsage(settings MetricSettings) metricK8sNodeMemoryUsage {
+ m := metricK8sNodeMemoryUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeMemoryWorkingSet struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.memory.working_set metric with initial data.
+func (m *metricK8sNodeMemoryWorkingSet) init() {
+ m.data.SetName("k8s.node.memory.working_set")
+ m.data.SetDescription("Node memory working_set")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sNodeMemoryWorkingSet) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeMemoryWorkingSet) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeMemoryWorkingSet) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeMemoryWorkingSet(settings MetricSettings) metricK8sNodeMemoryWorkingSet {
+ m := metricK8sNodeMemoryWorkingSet{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeNetworkErrors struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.network.errors metric with initial data.
+func (m *metricK8sNodeNetworkErrors) init() {
+ m.data.SetName("k8s.node.network.errors")
+ m.data.SetDescription("Node network errors")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricK8sNodeNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert("direction", pcommon.NewValueString(directionAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeNetworkErrors) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeNetworkErrors) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeNetworkErrors(settings MetricSettings) metricK8sNodeNetworkErrors {
+ m := metricK8sNodeNetworkErrors{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sNodeNetworkIo struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.node.network.io metric with initial data.
+func (m *metricK8sNodeNetworkIo) init() {
+ m.data.SetName("k8s.node.network.io")
+ m.data.SetDescription("Node network IO")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricK8sNodeNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert("direction", pcommon.NewValueString(directionAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sNodeNetworkIo) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sNodeNetworkIo) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sNodeNetworkIo(settings MetricSettings) metricK8sNodeNetworkIo {
+ m := metricK8sNodeNetworkIo{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodCPUTime struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.cpu.time metric with initial data.
+func (m *metricK8sPodCPUTime) init() {
+ m.data.SetName("k8s.pod.cpu.time")
+ m.data.SetDescription("Pod CPU time")
+ m.data.SetUnit("s")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricK8sPodCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodCPUTime) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodCPUTime) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodCPUTime(settings MetricSettings) metricK8sPodCPUTime {
+ m := metricK8sPodCPUTime{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodCPUUtilization struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.cpu.utilization metric with initial data.
+func (m *metricK8sPodCPUUtilization) init() {
+ m.data.SetName("k8s.pod.cpu.utilization")
+ m.data.SetDescription("Pod CPU utilization")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodCPUUtilization) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodCPUUtilization) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodCPUUtilization(settings MetricSettings) metricK8sPodCPUUtilization {
+ m := metricK8sPodCPUUtilization{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodFilesystemAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.filesystem.available metric with initial data.
+func (m *metricK8sPodFilesystemAvailable) init() {
+ m.data.SetName("k8s.pod.filesystem.available")
+ m.data.SetDescription("Pod filesystem available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodFilesystemAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodFilesystemAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodFilesystemAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodFilesystemAvailable(settings MetricSettings) metricK8sPodFilesystemAvailable {
+ m := metricK8sPodFilesystemAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodFilesystemCapacity struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.filesystem.capacity metric with initial data.
+func (m *metricK8sPodFilesystemCapacity) init() {
+ m.data.SetName("k8s.pod.filesystem.capacity")
+ m.data.SetDescription("Pod filesystem capacity")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodFilesystemCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodFilesystemCapacity) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodFilesystemCapacity) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodFilesystemCapacity(settings MetricSettings) metricK8sPodFilesystemCapacity {
+ m := metricK8sPodFilesystemCapacity{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodFilesystemUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.filesystem.usage metric with initial data.
+func (m *metricK8sPodFilesystemUsage) init() {
+ m.data.SetName("k8s.pod.filesystem.usage")
+ m.data.SetDescription("Pod filesystem usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodFilesystemUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodFilesystemUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodFilesystemUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodFilesystemUsage(settings MetricSettings) metricK8sPodFilesystemUsage {
+ m := metricK8sPodFilesystemUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.available metric with initial data.
+func (m *metricK8sPodMemoryAvailable) init() {
+ m.data.SetName("k8s.pod.memory.available")
+ m.data.SetDescription("Pod memory available")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryAvailable(settings MetricSettings) metricK8sPodMemoryAvailable {
+ m := metricK8sPodMemoryAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryMajorPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.major_page_faults metric with initial data.
+func (m *metricK8sPodMemoryMajorPageFaults) init() {
+ m.data.SetName("k8s.pod.memory.major_page_faults")
+ m.data.SetDescription("Pod memory major_page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryMajorPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryMajorPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryMajorPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryMajorPageFaults(settings MetricSettings) metricK8sPodMemoryMajorPageFaults {
+ m := metricK8sPodMemoryMajorPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryPageFaults struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.page_faults metric with initial data.
+func (m *metricK8sPodMemoryPageFaults) init() {
+ m.data.SetName("k8s.pod.memory.page_faults")
+ m.data.SetDescription("Pod memory page_faults")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryPageFaults) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryPageFaults) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryPageFaults(settings MetricSettings) metricK8sPodMemoryPageFaults {
+ m := metricK8sPodMemoryPageFaults{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryRss struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.rss metric with initial data.
+func (m *metricK8sPodMemoryRss) init() {
+ m.data.SetName("k8s.pod.memory.rss")
+ m.data.SetDescription("Pod memory rss")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryRss) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryRss) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryRss(settings MetricSettings) metricK8sPodMemoryRss {
+ m := metricK8sPodMemoryRss{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.usage metric with initial data.
+func (m *metricK8sPodMemoryUsage) init() {
+ m.data.SetName("k8s.pod.memory.usage")
+ m.data.SetDescription("Pod memory usage")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryUsage) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryUsage(settings MetricSettings) metricK8sPodMemoryUsage {
+ m := metricK8sPodMemoryUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodMemoryWorkingSet struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory.working_set metric with initial data.
+func (m *metricK8sPodMemoryWorkingSet) init() {
+ m.data.SetName("k8s.pod.memory.working_set")
+ m.data.SetDescription("Pod memory working_set")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sPodMemoryWorkingSet) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryWorkingSet) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryWorkingSet) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodMemoryWorkingSet(settings MetricSettings) metricK8sPodMemoryWorkingSet {
+ m := metricK8sPodMemoryWorkingSet{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodNetworkErrors struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.network.errors metric with initial data.
+func (m *metricK8sPodNetworkErrors) init() {
+ m.data.SetName("k8s.pod.network.errors")
+ m.data.SetDescription("Pod network errors")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricK8sPodNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert("direction", pcommon.NewValueString(directionAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodNetworkErrors) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodNetworkErrors) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodNetworkErrors(settings MetricSettings) metricK8sPodNetworkErrors {
+ m := metricK8sPodNetworkErrors{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sPodNetworkIo struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.network.io metric with initial data.
+func (m *metricK8sPodNetworkIo) init() {
+ m.data.SetName("k8s.pod.network.io")
+ m.data.SetDescription("Pod network IO")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricK8sPodNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+ dp.Attributes().Insert("direction", pcommon.NewValueString(directionAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodNetworkIo) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodNetworkIo) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sPodNetworkIo(settings MetricSettings) metricK8sPodNetworkIo {
+ m := metricK8sPodNetworkIo{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeAvailable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.available metric with initial data.
+func (m *metricK8sVolumeAvailable) init() {
+ m.data.SetName("k8s.volume.available")
+ m.data.SetDescription("The number of available bytes in the volume.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeAvailable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeAvailable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeAvailable(settings MetricSettings) metricK8sVolumeAvailable {
+ m := metricK8sVolumeAvailable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeCapacity struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.capacity metric with initial data.
+func (m *metricK8sVolumeCapacity) init() {
+ m.data.SetName("k8s.volume.capacity")
+ m.data.SetDescription("The total capacity in bytes of the volume.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeCapacity) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeCapacity) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeCapacity) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeCapacity(settings MetricSettings) metricK8sVolumeCapacity {
+ m := metricK8sVolumeCapacity{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeInodes struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.inodes metric with initial data.
+func (m *metricK8sVolumeInodes) init() {
+ m.data.SetName("k8s.volume.inodes")
+ m.data.SetDescription("The total inodes in the filesystem.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeInodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeInodes) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeInodes) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeInodes(settings MetricSettings) metricK8sVolumeInodes {
+ m := metricK8sVolumeInodes{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeInodesFree struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.inodes.free metric with initial data.
+func (m *metricK8sVolumeInodesFree) init() {
+ m.data.SetName("k8s.volume.inodes.free")
+ m.data.SetDescription("The free inodes in the filesystem.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeInodesFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeInodesFree) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeInodesFree) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeInodesFree(settings MetricSettings) metricK8sVolumeInodesFree {
+ m := metricK8sVolumeInodesFree{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricK8sVolumeInodesUsed struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills k8s.volume.inodes.used metric with initial data.
+func (m *metricK8sVolumeInodesUsed) init() {
+ m.data.SetName("k8s.volume.inodes.used")
+ m.data.SetDescription("The inodes used by the filesystem. This may not equal inodes - free because filesystem may share inodes with other filesystems.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricK8sVolumeInodesUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sVolumeInodesUsed) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sVolumeInodesUsed) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricK8sVolumeInodesUsed(settings MetricSettings) metricK8sVolumeInodesUsed {
+ m := metricK8sVolumeInodesUsed{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
+// required to produce metric representation defined in metadata and user settings.
+type MetricsBuilder struct {
+ startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
+ metricsCapacity int // maximum observed number of metrics per resource.
+ resourceCapacity int // maximum observed number of resource attributes.
+ metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
+ metricContainerCPUTime metricContainerCPUTime
+ metricContainerCPUUtilization metricContainerCPUUtilization
+ metricContainerFilesystemAvailable metricContainerFilesystemAvailable
+ metricContainerFilesystemCapacity metricContainerFilesystemCapacity
+ metricContainerFilesystemUsage metricContainerFilesystemUsage
+ metricContainerMemoryAvailable metricContainerMemoryAvailable
+ metricContainerMemoryMajorPageFaults metricContainerMemoryMajorPageFaults
+ metricContainerMemoryPageFaults metricContainerMemoryPageFaults
+ metricContainerMemoryRss metricContainerMemoryRss
+ metricContainerMemoryUsage metricContainerMemoryUsage
+ metricContainerMemoryWorkingSet metricContainerMemoryWorkingSet
+ metricK8sNodeCPUTime metricK8sNodeCPUTime
+ metricK8sNodeCPUUtilization metricK8sNodeCPUUtilization
+ metricK8sNodeFilesystemAvailable metricK8sNodeFilesystemAvailable
+ metricK8sNodeFilesystemCapacity metricK8sNodeFilesystemCapacity
+ metricK8sNodeFilesystemUsage metricK8sNodeFilesystemUsage
+ metricK8sNodeMemoryAvailable metricK8sNodeMemoryAvailable
+ metricK8sNodeMemoryMajorPageFaults metricK8sNodeMemoryMajorPageFaults
+ metricK8sNodeMemoryPageFaults metricK8sNodeMemoryPageFaults
+ metricK8sNodeMemoryRss metricK8sNodeMemoryRss
+ metricK8sNodeMemoryUsage metricK8sNodeMemoryUsage
+ metricK8sNodeMemoryWorkingSet metricK8sNodeMemoryWorkingSet
+ metricK8sNodeNetworkErrors metricK8sNodeNetworkErrors
+ metricK8sNodeNetworkIo metricK8sNodeNetworkIo
+ metricK8sPodCPUTime metricK8sPodCPUTime
+ metricK8sPodCPUUtilization metricK8sPodCPUUtilization
+ metricK8sPodFilesystemAvailable metricK8sPodFilesystemAvailable
+ metricK8sPodFilesystemCapacity metricK8sPodFilesystemCapacity
+ metricK8sPodFilesystemUsage metricK8sPodFilesystemUsage
+ metricK8sPodMemoryAvailable metricK8sPodMemoryAvailable
+ metricK8sPodMemoryMajorPageFaults metricK8sPodMemoryMajorPageFaults
+ metricK8sPodMemoryPageFaults metricK8sPodMemoryPageFaults
+ metricK8sPodMemoryRss metricK8sPodMemoryRss
+ metricK8sPodMemoryUsage metricK8sPodMemoryUsage
+ metricK8sPodMemoryWorkingSet metricK8sPodMemoryWorkingSet
+ metricK8sPodNetworkErrors metricK8sPodNetworkErrors
+ metricK8sPodNetworkIo metricK8sPodNetworkIo
+ metricK8sVolumeAvailable metricK8sVolumeAvailable
+ metricK8sVolumeCapacity metricK8sVolumeCapacity
+ metricK8sVolumeInodes metricK8sVolumeInodes
+ metricK8sVolumeInodesFree metricK8sVolumeInodesFree
+ metricK8sVolumeInodesUsed metricK8sVolumeInodesUsed
+}
+
+// metricBuilderOption applies changes to default metrics builder.
+type metricBuilderOption func(*MetricsBuilder)
+
+// WithStartTime sets startTime on the metrics builder.
+func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
+ return func(mb *MetricsBuilder) {
+ mb.startTime = startTime
+ }
+}
+
+func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder {
+ mb := &MetricsBuilder{
+ startTime: pcommon.NewTimestampFromTime(time.Now()),
+ metricsBuffer: pmetric.NewMetrics(),
+ metricContainerCPUTime: newMetricContainerCPUTime(settings.ContainerCPUTime),
+ metricContainerCPUUtilization: newMetricContainerCPUUtilization(settings.ContainerCPUUtilization),
+ metricContainerFilesystemAvailable: newMetricContainerFilesystemAvailable(settings.ContainerFilesystemAvailable),
+ metricContainerFilesystemCapacity: newMetricContainerFilesystemCapacity(settings.ContainerFilesystemCapacity),
+ metricContainerFilesystemUsage: newMetricContainerFilesystemUsage(settings.ContainerFilesystemUsage),
+ metricContainerMemoryAvailable: newMetricContainerMemoryAvailable(settings.ContainerMemoryAvailable),
+ metricContainerMemoryMajorPageFaults: newMetricContainerMemoryMajorPageFaults(settings.ContainerMemoryMajorPageFaults),
+ metricContainerMemoryPageFaults: newMetricContainerMemoryPageFaults(settings.ContainerMemoryPageFaults),
+ metricContainerMemoryRss: newMetricContainerMemoryRss(settings.ContainerMemoryRss),
+ metricContainerMemoryUsage: newMetricContainerMemoryUsage(settings.ContainerMemoryUsage),
+ metricContainerMemoryWorkingSet: newMetricContainerMemoryWorkingSet(settings.ContainerMemoryWorkingSet),
+ metricK8sNodeCPUTime: newMetricK8sNodeCPUTime(settings.K8sNodeCPUTime),
+ metricK8sNodeCPUUtilization: newMetricK8sNodeCPUUtilization(settings.K8sNodeCPUUtilization),
+ metricK8sNodeFilesystemAvailable: newMetricK8sNodeFilesystemAvailable(settings.K8sNodeFilesystemAvailable),
+ metricK8sNodeFilesystemCapacity: newMetricK8sNodeFilesystemCapacity(settings.K8sNodeFilesystemCapacity),
+ metricK8sNodeFilesystemUsage: newMetricK8sNodeFilesystemUsage(settings.K8sNodeFilesystemUsage),
+ metricK8sNodeMemoryAvailable: newMetricK8sNodeMemoryAvailable(settings.K8sNodeMemoryAvailable),
+ metricK8sNodeMemoryMajorPageFaults: newMetricK8sNodeMemoryMajorPageFaults(settings.K8sNodeMemoryMajorPageFaults),
+ metricK8sNodeMemoryPageFaults: newMetricK8sNodeMemoryPageFaults(settings.K8sNodeMemoryPageFaults),
+ metricK8sNodeMemoryRss: newMetricK8sNodeMemoryRss(settings.K8sNodeMemoryRss),
+ metricK8sNodeMemoryUsage: newMetricK8sNodeMemoryUsage(settings.K8sNodeMemoryUsage),
+ metricK8sNodeMemoryWorkingSet: newMetricK8sNodeMemoryWorkingSet(settings.K8sNodeMemoryWorkingSet),
+ metricK8sNodeNetworkErrors: newMetricK8sNodeNetworkErrors(settings.K8sNodeNetworkErrors),
+ metricK8sNodeNetworkIo: newMetricK8sNodeNetworkIo(settings.K8sNodeNetworkIo),
+ metricK8sPodCPUTime: newMetricK8sPodCPUTime(settings.K8sPodCPUTime),
+ metricK8sPodCPUUtilization: newMetricK8sPodCPUUtilization(settings.K8sPodCPUUtilization),
+ metricK8sPodFilesystemAvailable: newMetricK8sPodFilesystemAvailable(settings.K8sPodFilesystemAvailable),
+ metricK8sPodFilesystemCapacity: newMetricK8sPodFilesystemCapacity(settings.K8sPodFilesystemCapacity),
+ metricK8sPodFilesystemUsage: newMetricK8sPodFilesystemUsage(settings.K8sPodFilesystemUsage),
+ metricK8sPodMemoryAvailable: newMetricK8sPodMemoryAvailable(settings.K8sPodMemoryAvailable),
+ metricK8sPodMemoryMajorPageFaults: newMetricK8sPodMemoryMajorPageFaults(settings.K8sPodMemoryMajorPageFaults),
+ metricK8sPodMemoryPageFaults: newMetricK8sPodMemoryPageFaults(settings.K8sPodMemoryPageFaults),
+ metricK8sPodMemoryRss: newMetricK8sPodMemoryRss(settings.K8sPodMemoryRss),
+ metricK8sPodMemoryUsage: newMetricK8sPodMemoryUsage(settings.K8sPodMemoryUsage),
+ metricK8sPodMemoryWorkingSet: newMetricK8sPodMemoryWorkingSet(settings.K8sPodMemoryWorkingSet),
+ metricK8sPodNetworkErrors: newMetricK8sPodNetworkErrors(settings.K8sPodNetworkErrors),
+ metricK8sPodNetworkIo: newMetricK8sPodNetworkIo(settings.K8sPodNetworkIo),
+ metricK8sVolumeAvailable: newMetricK8sVolumeAvailable(settings.K8sVolumeAvailable),
+ metricK8sVolumeCapacity: newMetricK8sVolumeCapacity(settings.K8sVolumeCapacity),
+ metricK8sVolumeInodes: newMetricK8sVolumeInodes(settings.K8sVolumeInodes),
+ metricK8sVolumeInodesFree: newMetricK8sVolumeInodesFree(settings.K8sVolumeInodesFree),
+ metricK8sVolumeInodesUsed: newMetricK8sVolumeInodesUsed(settings.K8sVolumeInodesUsed),
+ }
+ for _, op := range options {
+ op(mb)
+ }
+ return mb
+}
+
+// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
+func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
+ if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
+ mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
+ }
+ if mb.resourceCapacity < rm.Resource().Attributes().Len() {
+ mb.resourceCapacity = rm.Resource().Attributes().Len()
+ }
+}
+
+// ResourceMetricsOption applies changes to provided resource metrics.
+type ResourceMetricsOption func(pmetric.ResourceMetrics)
+
+// WithAwsVolumeID sets provided value as "aws.volume.id" attribute for current resource.
+func WithAwsVolumeID(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("aws.volume.id", val)
+ }
+}
+
+// WithContainerID sets provided value as "container.id" attribute for current resource.
+func WithContainerID(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("container.id", val)
+ }
+}
+
+// WithContainerName sets provided value as "container.name" attribute for current resource.
+func WithContainerName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("container.name", val)
+ }
+}
+
+// WithFsType sets provided value as "fs.type" attribute for current resource.
+func WithFsType(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("fs.type", val)
+ }
+}
+
+// WithGcePdName sets provided value as "gce.pd.name" attribute for current resource.
+func WithGcePdName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("gce.pd.name", val)
+ }
+}
+
+// WithGlusterfsEndpointsName sets provided value as "glusterfs.endpoints.name" attribute for current resource.
+func WithGlusterfsEndpointsName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("glusterfs.endpoints.name", val)
+ }
+}
+
+// WithGlusterfsPath sets provided value as "glusterfs.path" attribute for current resource.
+func WithGlusterfsPath(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("glusterfs.path", val)
+ }
+}
+
+// WithK8sNamespaceName sets provided value as "k8s.namespace.name" attribute for current resource.
+func WithK8sNamespaceName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.namespace.name", val)
+ }
+}
+
+// WithK8sNodeName sets provided value as "k8s.node.name" attribute for current resource.
+func WithK8sNodeName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.node.name", val)
+ }
+}
+
+// WithK8sPersistentvolumeclaimName sets provided value as "k8s.persistentvolumeclaim.name" attribute for current resource.
+func WithK8sPersistentvolumeclaimName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.persistentvolumeclaim.name", val)
+ }
+}
+
+// WithK8sPodName sets provided value as "k8s.pod.name" attribute for current resource.
+func WithK8sPodName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.pod.name", val)
+ }
+}
+
+// WithK8sPodUID sets provided value as "k8s.pod.uid" attribute for current resource.
+func WithK8sPodUID(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.pod.uid", val)
+ }
+}
+
+// WithK8sVolumeName sets provided value as "k8s.volume.name" attribute for current resource.
+func WithK8sVolumeName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.volume.name", val)
+ }
+}
+
+// WithK8sVolumeType sets provided value as "k8s.volume.type" attribute for current resource.
+func WithK8sVolumeType(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("k8s.volume.type", val)
+ }
+}
+
+// WithPartition sets provided value as "partition" attribute for current resource.
+func WithPartition(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("partition", val)
+ }
+}
+
+// WithStartTimeOverride overrides start time for all the resource metrics data points.
+// This option should be only used if different start time has to be set on metrics coming from different resources.
+func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ metrics := rm.ScopeMetrics().At(0).Metrics()
+ for i := 0; i < metrics.Len(); i++ {
+ dps := pmetric.NewNumberDataPointSlice()
+ switch metrics.At(i).DataType() {
+ case pmetric.MetricDataTypeGauge:
+ dps = metrics.At(i).Gauge().DataPoints()
+ case pmetric.MetricDataTypeSum:
+ dps = metrics.At(i).Sum().DataPoints()
+ }
+ for j := 0; j < dps.Len(); j++ {
+ dps.At(j).SetStartTimestamp(start)
+ }
+ }
+ }
+}
+
+// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
+// recording another set of data points as part of another resource. This function can be helpful when one scraper
+// needs to emit metrics from several resources. Otherwise calling this function is not required,
+// just `Emit` function can be called instead.
+// Resource attributes should be provided as ResourceMetricsOption arguments.
+func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
+ rm := pmetric.NewResourceMetrics()
+ rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity)
+ ils := rm.ScopeMetrics().AppendEmpty()
+ ils.Scope().SetName("otelcol/kubeletstatsreceiver")
+ ils.Metrics().EnsureCapacity(mb.metricsCapacity)
+ mb.metricContainerCPUTime.emit(ils.Metrics())
+ mb.metricContainerCPUUtilization.emit(ils.Metrics())
+ mb.metricContainerFilesystemAvailable.emit(ils.Metrics())
+ mb.metricContainerFilesystemCapacity.emit(ils.Metrics())
+ mb.metricContainerFilesystemUsage.emit(ils.Metrics())
+ mb.metricContainerMemoryAvailable.emit(ils.Metrics())
+ mb.metricContainerMemoryMajorPageFaults.emit(ils.Metrics())
+ mb.metricContainerMemoryPageFaults.emit(ils.Metrics())
+ mb.metricContainerMemoryRss.emit(ils.Metrics())
+ mb.metricContainerMemoryUsage.emit(ils.Metrics())
+ mb.metricContainerMemoryWorkingSet.emit(ils.Metrics())
+ mb.metricK8sNodeCPUTime.emit(ils.Metrics())
+ mb.metricK8sNodeCPUUtilization.emit(ils.Metrics())
+ mb.metricK8sNodeFilesystemAvailable.emit(ils.Metrics())
+ mb.metricK8sNodeFilesystemCapacity.emit(ils.Metrics())
+ mb.metricK8sNodeFilesystemUsage.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryAvailable.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryMajorPageFaults.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryPageFaults.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryRss.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryUsage.emit(ils.Metrics())
+ mb.metricK8sNodeMemoryWorkingSet.emit(ils.Metrics())
+ mb.metricK8sNodeNetworkErrors.emit(ils.Metrics())
+ mb.metricK8sNodeNetworkIo.emit(ils.Metrics())
+ mb.metricK8sPodCPUTime.emit(ils.Metrics())
+ mb.metricK8sPodCPUUtilization.emit(ils.Metrics())
+ mb.metricK8sPodFilesystemAvailable.emit(ils.Metrics())
+ mb.metricK8sPodFilesystemCapacity.emit(ils.Metrics())
+ mb.metricK8sPodFilesystemUsage.emit(ils.Metrics())
+ mb.metricK8sPodMemoryAvailable.emit(ils.Metrics())
+ mb.metricK8sPodMemoryMajorPageFaults.emit(ils.Metrics())
+ mb.metricK8sPodMemoryPageFaults.emit(ils.Metrics())
+ mb.metricK8sPodMemoryRss.emit(ils.Metrics())
+ mb.metricK8sPodMemoryUsage.emit(ils.Metrics())
+ mb.metricK8sPodMemoryWorkingSet.emit(ils.Metrics())
+ mb.metricK8sPodNetworkErrors.emit(ils.Metrics())
+ mb.metricK8sPodNetworkIo.emit(ils.Metrics())
+ mb.metricK8sVolumeAvailable.emit(ils.Metrics())
+ mb.metricK8sVolumeCapacity.emit(ils.Metrics())
+ mb.metricK8sVolumeInodes.emit(ils.Metrics())
+ mb.metricK8sVolumeInodesFree.emit(ils.Metrics())
+ mb.metricK8sVolumeInodesUsed.emit(ils.Metrics())
+ for _, op := range rmo {
+ op(rm)
+ }
+ if ils.Metrics().Len() > 0 {
+ mb.updateCapacity(rm)
+ rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
+ }
+}
+
+// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
+// recording another set of metrics. This function will be responsible for applying all the transformations required to
+// produce metric representation defined in metadata and user settings, e.g. delta or cumulative.
+func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics {
+ mb.EmitForResource(rmo...)
+ metrics := pmetric.NewMetrics()
+ mb.metricsBuffer.MoveTo(metrics)
+ return metrics
+}
+
+// RecordContainerCPUTimeDataPoint adds a data point to container.cpu.time metric.
+func (mb *MetricsBuilder) RecordContainerCPUTimeDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricContainerCPUTime.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerCPUUtilizationDataPoint adds a data point to container.cpu.utilization metric.
+func (mb *MetricsBuilder) RecordContainerCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricContainerCPUUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerFilesystemAvailableDataPoint adds a data point to container.filesystem.available metric.
+func (mb *MetricsBuilder) RecordContainerFilesystemAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerFilesystemAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerFilesystemCapacityDataPoint adds a data point to container.filesystem.capacity metric.
+func (mb *MetricsBuilder) RecordContainerFilesystemCapacityDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerFilesystemCapacity.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerFilesystemUsageDataPoint adds a data point to container.filesystem.usage metric.
+func (mb *MetricsBuilder) RecordContainerFilesystemUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerFilesystemUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryAvailableDataPoint adds a data point to container.memory.available metric.
+func (mb *MetricsBuilder) RecordContainerMemoryAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryMajorPageFaultsDataPoint adds a data point to container.memory.major_page_faults metric.
+func (mb *MetricsBuilder) RecordContainerMemoryMajorPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryMajorPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryPageFaultsDataPoint adds a data point to container.memory.page_faults metric.
+func (mb *MetricsBuilder) RecordContainerMemoryPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryRssDataPoint adds a data point to container.memory.rss metric.
+func (mb *MetricsBuilder) RecordContainerMemoryRssDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryRss.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryUsageDataPoint adds a data point to container.memory.usage metric.
+func (mb *MetricsBuilder) RecordContainerMemoryUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryWorkingSetDataPoint adds a data point to container.memory.working_set metric.
+func (mb *MetricsBuilder) RecordContainerMemoryWorkingSetDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryWorkingSet.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeCPUTimeDataPoint adds a data point to k8s.node.cpu.time metric.
+func (mb *MetricsBuilder) RecordK8sNodeCPUTimeDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricK8sNodeCPUTime.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeCPUUtilizationDataPoint adds a data point to k8s.node.cpu.utilization metric.
+func (mb *MetricsBuilder) RecordK8sNodeCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricK8sNodeCPUUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeFilesystemAvailableDataPoint adds a data point to k8s.node.filesystem.available metric.
+func (mb *MetricsBuilder) RecordK8sNodeFilesystemAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeFilesystemAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeFilesystemCapacityDataPoint adds a data point to k8s.node.filesystem.capacity metric.
+func (mb *MetricsBuilder) RecordK8sNodeFilesystemCapacityDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeFilesystemCapacity.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeFilesystemUsageDataPoint adds a data point to k8s.node.filesystem.usage metric.
+func (mb *MetricsBuilder) RecordK8sNodeFilesystemUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeFilesystemUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryAvailableDataPoint adds a data point to k8s.node.memory.available metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryMajorPageFaultsDataPoint adds a data point to k8s.node.memory.major_page_faults metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryMajorPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryMajorPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryPageFaultsDataPoint adds a data point to k8s.node.memory.page_faults metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryRssDataPoint adds a data point to k8s.node.memory.rss metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryRssDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryRss.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryUsageDataPoint adds a data point to k8s.node.memory.usage metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeMemoryWorkingSetDataPoint adds a data point to k8s.node.memory.working_set metric.
+func (mb *MetricsBuilder) RecordK8sNodeMemoryWorkingSetDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sNodeMemoryWorkingSet.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sNodeNetworkErrorsDataPoint adds a data point to k8s.node.network.errors metric.
+func (mb *MetricsBuilder) RecordK8sNodeNetworkErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricK8sNodeNetworkErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue, directionAttributeValue.String())
+}
+
+// RecordK8sNodeNetworkIoDataPoint adds a data point to k8s.node.network.io metric.
+func (mb *MetricsBuilder) RecordK8sNodeNetworkIoDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricK8sNodeNetworkIo.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue, directionAttributeValue.String())
+}
+
+// RecordK8sPodCPUTimeDataPoint adds a data point to k8s.pod.cpu.time metric.
+func (mb *MetricsBuilder) RecordK8sPodCPUTimeDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricK8sPodCPUTime.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodCPUUtilizationDataPoint adds a data point to k8s.pod.cpu.utilization metric.
+func (mb *MetricsBuilder) RecordK8sPodCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricK8sPodCPUUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodFilesystemAvailableDataPoint adds a data point to k8s.pod.filesystem.available metric.
+func (mb *MetricsBuilder) RecordK8sPodFilesystemAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodFilesystemAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodFilesystemCapacityDataPoint adds a data point to k8s.pod.filesystem.capacity metric.
+func (mb *MetricsBuilder) RecordK8sPodFilesystemCapacityDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodFilesystemCapacity.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodFilesystemUsageDataPoint adds a data point to k8s.pod.filesystem.usage metric.
+func (mb *MetricsBuilder) RecordK8sPodFilesystemUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodFilesystemUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryAvailableDataPoint adds a data point to k8s.pod.memory.available metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryMajorPageFaultsDataPoint adds a data point to k8s.pod.memory.major_page_faults metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryMajorPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryMajorPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryPageFaultsDataPoint adds a data point to k8s.pod.memory.page_faults metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryPageFaultsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryPageFaults.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryRssDataPoint adds a data point to k8s.pod.memory.rss metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryRssDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryRss.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryUsageDataPoint adds a data point to k8s.pod.memory.usage metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryUsageDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryUsage.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryWorkingSetDataPoint adds a data point to k8s.pod.memory.working_set metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryWorkingSetDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sPodMemoryWorkingSet.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodNetworkErrorsDataPoint adds a data point to k8s.pod.network.errors metric.
+func (mb *MetricsBuilder) RecordK8sPodNetworkErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricK8sPodNetworkErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue, directionAttributeValue.String())
+}
+
+// RecordK8sPodNetworkIoDataPoint adds a data point to k8s.pod.network.io metric.
+func (mb *MetricsBuilder) RecordK8sPodNetworkIoDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricK8sPodNetworkIo.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue, directionAttributeValue.String())
+}
+
+// RecordK8sVolumeAvailableDataPoint adds a data point to k8s.volume.available metric.
+func (mb *MetricsBuilder) RecordK8sVolumeAvailableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeAvailable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sVolumeCapacityDataPoint adds a data point to k8s.volume.capacity metric.
+func (mb *MetricsBuilder) RecordK8sVolumeCapacityDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeCapacity.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sVolumeInodesDataPoint adds a data point to k8s.volume.inodes metric.
+func (mb *MetricsBuilder) RecordK8sVolumeInodesDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeInodes.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sVolumeInodesFreeDataPoint adds a data point to k8s.volume.inodes.free metric.
+func (mb *MetricsBuilder) RecordK8sVolumeInodesFreeDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeInodesFree.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sVolumeInodesUsedDataPoint adds a data point to k8s.volume.inodes.used metric.
+func (mb *MetricsBuilder) RecordK8sVolumeInodesUsedDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricK8sVolumeInodesUsed.recordDataPoint(mb.startTime, ts, val)
+}
+
+// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
+// and metrics builder should update its startTime and reset it's internal state accordingly.
+func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
+ mb.startTime = pcommon.NewTimestampFromTime(time.Now())
+ for _, op := range options {
+ op(mb)
+ }
+}
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
index 3ccc516674c2..590e1cf44c56 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
@@ -14,113 +14,128 @@
package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
+import "go.opentelemetry.io/collector/pdata/pcommon"
+
+type RecordDoubleDataPointFunc func(*MetricsBuilder, pcommon.Timestamp, float64)
+
+type RecordIntDataPointFunc func(*MetricsBuilder, pcommon.Timestamp, int64)
+
+type RecordIntDataPointWithDirectionFunc func(*MetricsBuilder, pcommon.Timestamp, int64, string, AttributeDirection)
+
+type MetricsBuilders struct {
+ NodeMetricsBuilder *MetricsBuilder
+ PodMetricsBuilder *MetricsBuilder
+ ContainerMetricsBuilder *MetricsBuilder
+ OtherMetricsBuilder *MetricsBuilder
+}
+
type CPUMetrics struct {
- Time MetricIntf
- Utilization MetricIntf
+ Time RecordDoubleDataPointFunc
+ Utilization RecordDoubleDataPointFunc
}
var NodeCPUMetrics = CPUMetrics{
- Time: M.K8sNodeCPUTime,
- Utilization: M.K8sNodeCPUUtilization,
+ Time: (*MetricsBuilder).RecordK8sNodeCPUTimeDataPoint,
+ Utilization: (*MetricsBuilder).RecordK8sNodeCPUUtilizationDataPoint,
}
var PodCPUMetrics = CPUMetrics{
- Time: M.K8sPodCPUTime,
- Utilization: M.K8sPodCPUUtilization,
+ Time: (*MetricsBuilder).RecordK8sPodCPUTimeDataPoint,
+ Utilization: (*MetricsBuilder).RecordK8sPodCPUUtilizationDataPoint,
}
var ContainerCPUMetrics = CPUMetrics{
- Time: M.ContainerCPUTime,
- Utilization: M.ContainerCPUUtilization,
+ Time: (*MetricsBuilder).RecordContainerCPUTimeDataPoint,
+ Utilization: (*MetricsBuilder).RecordContainerCPUUtilizationDataPoint,
}
type MemoryMetrics struct {
- Available MetricIntf
- Usage MetricIntf
- Rss MetricIntf
- WorkingSet MetricIntf
- PageFaults MetricIntf
- MajorPageFaults MetricIntf
+ Available RecordIntDataPointFunc
+ Usage RecordIntDataPointFunc
+ Rss RecordIntDataPointFunc
+ WorkingSet RecordIntDataPointFunc
+ PageFaults RecordIntDataPointFunc
+ MajorPageFaults RecordIntDataPointFunc
}
var NodeMemoryMetrics = MemoryMetrics{
- Available: M.K8sNodeMemoryAvailable,
- Usage: M.K8sNodeMemoryUsage,
- Rss: M.K8sNodeMemoryRss,
- WorkingSet: M.K8sNodeMemoryWorkingSet,
- PageFaults: M.K8sNodeMemoryPageFaults,
- MajorPageFaults: M.K8sNodeMemoryMajorPageFaults,
+ Available: (*MetricsBuilder).RecordK8sNodeMemoryAvailableDataPoint,
+ Usage: (*MetricsBuilder).RecordK8sNodeMemoryUsageDataPoint,
+ Rss: (*MetricsBuilder).RecordK8sNodeMemoryRssDataPoint,
+ WorkingSet: (*MetricsBuilder).RecordK8sNodeMemoryWorkingSetDataPoint,
+ PageFaults: (*MetricsBuilder).RecordK8sNodeMemoryPageFaultsDataPoint,
+ MajorPageFaults: (*MetricsBuilder).RecordK8sNodeMemoryMajorPageFaultsDataPoint,
}
var PodMemoryMetrics = MemoryMetrics{
- Available: M.K8sPodMemoryAvailable,
- Usage: M.K8sPodMemoryUsage,
- Rss: M.K8sPodMemoryRss,
- WorkingSet: M.K8sPodMemoryWorkingSet,
- PageFaults: M.K8sPodMemoryPageFaults,
- MajorPageFaults: M.K8sPodMemoryMajorPageFaults,
+ Available: (*MetricsBuilder).RecordK8sPodMemoryAvailableDataPoint,
+ Usage: (*MetricsBuilder).RecordK8sPodMemoryUsageDataPoint,
+ Rss: (*MetricsBuilder).RecordK8sPodMemoryRssDataPoint,
+ WorkingSet: (*MetricsBuilder).RecordK8sPodMemoryWorkingSetDataPoint,
+ PageFaults: (*MetricsBuilder).RecordK8sPodMemoryPageFaultsDataPoint,
+ MajorPageFaults: (*MetricsBuilder).RecordK8sPodMemoryMajorPageFaultsDataPoint,
}
var ContainerMemoryMetrics = MemoryMetrics{
- Available: M.ContainerMemoryAvailable,
- Usage: M.ContainerMemoryUsage,
- Rss: M.ContainerMemoryRss,
- WorkingSet: M.ContainerMemoryWorkingSet,
- PageFaults: M.ContainerMemoryPageFaults,
- MajorPageFaults: M.ContainerMemoryMajorPageFaults,
+ Available: (*MetricsBuilder).RecordContainerMemoryAvailableDataPoint,
+ Usage: (*MetricsBuilder).RecordContainerMemoryUsageDataPoint,
+ Rss: (*MetricsBuilder).RecordContainerMemoryRssDataPoint,
+ WorkingSet: (*MetricsBuilder).RecordContainerMemoryWorkingSetDataPoint,
+ PageFaults: (*MetricsBuilder).RecordContainerMemoryPageFaultsDataPoint,
+ MajorPageFaults: (*MetricsBuilder).RecordContainerMemoryMajorPageFaultsDataPoint,
}
type FilesystemMetrics struct {
- Available MetricIntf
- Capacity MetricIntf
- Usage MetricIntf
+ Available RecordIntDataPointFunc
+ Capacity RecordIntDataPointFunc
+ Usage RecordIntDataPointFunc
}
var NodeFilesystemMetrics = FilesystemMetrics{
- Available: M.K8sNodeFilesystemAvailable,
- Capacity: M.K8sNodeFilesystemCapacity,
- Usage: M.K8sNodeFilesystemUsage,
+ Available: (*MetricsBuilder).RecordK8sNodeFilesystemAvailableDataPoint,
+ Capacity: (*MetricsBuilder).RecordK8sNodeFilesystemCapacityDataPoint,
+ Usage: (*MetricsBuilder).RecordK8sNodeFilesystemUsageDataPoint,
}
var PodFilesystemMetrics = FilesystemMetrics{
- Available: M.K8sPodFilesystemAvailable,
- Capacity: M.K8sPodFilesystemCapacity,
- Usage: M.K8sPodFilesystemUsage,
+ Available: (*MetricsBuilder).RecordK8sPodFilesystemAvailableDataPoint,
+ Capacity: (*MetricsBuilder).RecordK8sPodFilesystemCapacityDataPoint,
+ Usage: (*MetricsBuilder).RecordK8sPodFilesystemUsageDataPoint,
}
var ContainerFilesystemMetrics = FilesystemMetrics{
- Available: M.ContainerFilesystemAvailable,
- Capacity: M.ContainerFilesystemCapacity,
- Usage: M.ContainerFilesystemUsage,
+ Available: (*MetricsBuilder).RecordContainerFilesystemAvailableDataPoint,
+ Capacity: (*MetricsBuilder).RecordContainerFilesystemCapacityDataPoint,
+ Usage: (*MetricsBuilder).RecordContainerFilesystemUsageDataPoint,
}
type NetworkMetrics struct {
- IO MetricIntf
- Errors MetricIntf
+ IO RecordIntDataPointWithDirectionFunc
+ Errors RecordIntDataPointWithDirectionFunc
}
var NodeNetworkMetrics = NetworkMetrics{
- IO: M.K8sNodeNetworkIo,
- Errors: M.K8sNodeNetworkErrors,
+ IO: (*MetricsBuilder).RecordK8sNodeNetworkIoDataPoint,
+ Errors: (*MetricsBuilder).RecordK8sNodeNetworkErrorsDataPoint,
}
var PodNetworkMetrics = NetworkMetrics{
- IO: M.K8sPodNetworkIo,
- Errors: M.K8sPodNetworkErrors,
+ IO: (*MetricsBuilder).RecordK8sPodNetworkIoDataPoint,
+ Errors: (*MetricsBuilder).RecordK8sPodNetworkErrorsDataPoint,
}
type VolumeMetrics struct {
- Available MetricIntf
- Capacity MetricIntf
- Inodes MetricIntf
- InodesFree MetricIntf
- InodesUsed MetricIntf
+ Available RecordIntDataPointFunc
+ Capacity RecordIntDataPointFunc
+ Inodes RecordIntDataPointFunc
+ InodesFree RecordIntDataPointFunc
+ InodesUsed RecordIntDataPointFunc
}
var K8sVolumeMetrics = VolumeMetrics{
- Available: M.K8sVolumeAvailable,
- Capacity: M.K8sVolumeCapacity,
- Inodes: M.K8sVolumeInodes,
- InodesFree: M.K8sVolumeInodesFree,
- InodesUsed: M.K8sVolumeInodesUsed,
+ Available: (*MetricsBuilder).RecordK8sVolumeAvailableDataPoint,
+ Capacity: (*MetricsBuilder).RecordK8sVolumeCapacityDataPoint,
+ Inodes: (*MetricsBuilder).RecordK8sVolumeInodesDataPoint,
+ InodesFree: (*MetricsBuilder).RecordK8sVolumeInodesFreeDataPoint,
+ InodesUsed: (*MetricsBuilder).RecordK8sVolumeInodesUsedDataPoint,
}
diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go
index cf4ab41a83d2..5d2d6ebac362 100644
--- a/receiver/kubeletstatsreceiver/scraper.go
+++ b/receiver/kubeletstatsreceiver/scraper.go
@@ -29,6 +29,7 @@ import (
"k8s.io/client-go/kubernetes"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
type scraperOptions struct {
@@ -46,13 +47,15 @@ type kubletScraper struct {
extraMetadataLabels []kubelet.MetadataLabel
metricGroupsToCollect map[kubelet.MetricGroup]bool
k8sAPIClient kubernetes.Interface
- cachedVolumeLabels map[string]map[string]string
+ cachedVolumeLabels map[string][]metadata.ResourceMetricsOption
+ mbs *metadata.MetricsBuilders
}
func newKubletScraper(
restClient kubelet.RestClient,
set component.ReceiverCreateSettings,
rOptions *scraperOptions,
+ metricsConfig metadata.MetricsSettings,
) (scraperhelper.Scraper, error) {
ks := &kubletScraper{
statsProvider: kubelet.NewStatsProvider(restClient),
@@ -61,7 +64,13 @@ func newKubletScraper(
extraMetadataLabels: rOptions.extraMetadataLabels,
metricGroupsToCollect: rOptions.metricGroupsToCollect,
k8sAPIClient: rOptions.k8sAPIClient,
- cachedVolumeLabels: make(map[string]map[string]string),
+ cachedVolumeLabels: make(map[string][]metadata.ResourceMetricsOption),
+ mbs: &metadata.MetricsBuilders{
+ NodeMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ PodMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ ContainerMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ OtherMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig),
+ },
}
return scraperhelper.NewScraper(typeStr, ks.scrape)
}
@@ -84,7 +93,7 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) {
}
metadata := kubelet.NewMetadata(r.extraMetadataLabels, podsMetadata, r.detailedPVCLabelsSetter())
- mds := kubelet.MetricsData(r.logger, summary, metadata, r.metricGroupsToCollect)
+ mds := kubelet.MetricsData(r.logger, summary, metadata, r.metricGroupsToCollect, r.mbs)
md := pmetric.NewMetrics()
for i := range mds {
mds[i].ResourceMetrics().MoveAndAppendTo(md.ResourceMetrics())
@@ -92,39 +101,34 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) {
return md, nil
}
-func (r *kubletScraper) detailedPVCLabelsSetter() func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
- return func(volCacheID, volumeClaim, namespace string, labels map[string]string) error {
+func (r *kubletScraper) detailedPVCLabelsSetter() func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
+ return func(volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error) {
if r.k8sAPIClient == nil {
- return nil
+ return nil, nil
}
if r.cachedVolumeLabels[volCacheID] == nil {
ctx := context.Background()
pvc, err := r.k8sAPIClient.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, volumeClaim, metav1.GetOptions{})
if err != nil {
- return err
+ return nil, err
}
volName := pvc.Spec.VolumeName
if volName == "" {
- return fmt.Errorf("PersistentVolumeClaim %s does not have a volume name", pvc.Name)
+ return nil, fmt.Errorf("PersistentVolumeClaim %s does not have a volume name", pvc.Name)
}
pv, err := r.k8sAPIClient.CoreV1().PersistentVolumes().Get(ctx, volName, metav1.GetOptions{})
if err != nil {
- return err
+ return nil, err
}
- labelsToCache := make(map[string]string)
- kubelet.GetPersistentVolumeLabels(pv.Spec.PersistentVolumeSource, labelsToCache)
+ ro := kubelet.GetPersistentVolumeLabels(pv.Spec.PersistentVolumeSource)
// Cache collected labels.
- r.cachedVolumeLabels[volCacheID] = labelsToCache
+ r.cachedVolumeLabels[volCacheID] = ro
}
-
- for k, v := range r.cachedVolumeLabels[volCacheID] {
- labels[k] = v
- }
- return nil
+ return r.cachedVolumeLabels[volCacheID], nil
}
}
diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go
index c4828055fae8..617c066a3901 100644
--- a/receiver/kubeletstatsreceiver/scraper_test.go
+++ b/receiver/kubeletstatsreceiver/scraper_test.go
@@ -30,6 +30,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
const (
@@ -63,6 +64,7 @@ func TestScraper(t *testing.T) {
&fakeRestClient{},
componenttest.NewNopReceiverCreateSettings(),
options,
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)
@@ -112,6 +114,7 @@ func TestScraperWithMetadata(t *testing.T) {
&fakeRestClient{},
componenttest.NewNopReceiverCreateSettings(),
options,
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)
@@ -195,6 +198,7 @@ func TestScraperWithMetricGroups(t *testing.T) {
extraMetadataLabels: []kubelet.MetadataLabel{kubelet.MetadataLabelContainerID},
metricGroupsToCollect: test.metricGroups,
},
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)
@@ -344,6 +348,7 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) {
},
k8sAPIClient: test.k8sAPIClient,
},
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)
@@ -456,6 +461,7 @@ func TestClientErrors(t *testing.T) {
},
settings,
options,
+ metadata.DefaultMetricsSettings(),
)
require.NoError(t, err)