diff --git a/go.mod b/go.mod
index d94eac1977f3..de8d9201b7f2 100644
--- a/go.mod
+++ b/go.mod
@@ -949,6 +949,9 @@ replace github.com/googleapis/gnostic v0.5.6 => github.com/googleapis/gnostic v0
// see https://github.com/DataDog/dd-trace-go/issues/1220
replace github.com/go-chi/chi/v4 => github.com/go-chi/chi v4.0.0+incompatible
+// see https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/12322#issuecomment-1185029670
+replace github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 => github.com/docker/go-connections v0.4.0
+
// see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/4433
exclude github.com/StackExchange/wmi v1.2.0
diff --git a/go.sum b/go.sum
index 3be9c97b0707..505dc07390f7 100644
--- a/go.sum
+++ b/go.sum
@@ -606,9 +606,8 @@ github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y=
-github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
diff --git a/internal/docker/docker.go b/internal/docker/docker.go
index 1b5c258a5d75..94d040f2285b 100644
--- a/internal/docker/docker.go
+++ b/internal/docker/docker.go
@@ -56,11 +56,13 @@ type Client struct {
logger *zap.Logger
}
-func NewDockerClient(config *Config, logger *zap.Logger) (*Client, error) {
+func NewDockerClient(config *Config, logger *zap.Logger, opts ...docker.Opt) (*Client, error) {
client, err := docker.NewClientWithOpts(
- docker.WithHost(config.Endpoint),
- docker.WithVersion(fmt.Sprintf("v%v", config.DockerAPIVersion)),
- docker.WithHTTPHeaders(map[string]string{"User-Agent": userAgent}),
+ append([]docker.Opt{
+ docker.WithHost(config.Endpoint),
+ docker.WithVersion(fmt.Sprintf("v%v", config.DockerAPIVersion)),
+ docker.WithHTTPHeaders(map[string]string{"User-Agent": userAgent}),
+ }, opts...)...,
)
if err != nil {
return nil, fmt.Errorf("could not create docker client: %w", err)
diff --git a/receiver/dockerstatsreceiver/config.go b/receiver/dockerstatsreceiver/config.go
index a3e7b5626c40..25f0817fe371 100644
--- a/receiver/dockerstatsreceiver/config.go
+++ b/receiver/dockerstatsreceiver/config.go
@@ -21,6 +21,8 @@ import (
"go.opentelemetry.io/collector/config"
"go.opentelemetry.io/collector/receiver/scraperhelper"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver/internal/metadata"
)
var _ config.Receiver = (*Config)(nil)
@@ -56,6 +58,9 @@ type Config struct {
// Docker client API version. Default is 1.22
DockerAPIVersion float64 `mapstructure:"api_version"`
+
+ // Metrics config. Enable or disable stats by name.
+ MetricsConfig metadata.MetricsSettings `mapstructure:"stats"`
}
func (config Config) Validate() error {
diff --git a/receiver/dockerstatsreceiver/doc.go b/receiver/dockerstatsreceiver/doc.go
new file mode 100644
index 000000000000..1d61e7250425
--- /dev/null
+++ b/receiver/dockerstatsreceiver/doc.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate mdatagen --experimental-gen metadata.yaml
+
+package dockerstatsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver"
diff --git a/receiver/dockerstatsreceiver/documentation.md b/receiver/dockerstatsreceiver/documentation.md
new file mode 100644
index 000000000000..8e6c9453caf5
--- /dev/null
+++ b/receiver/dockerstatsreceiver/documentation.md
@@ -0,0 +1,141 @@
+[comment]: <> (Code generated by mdatagen. DO NOT EDIT.)
+
+# dockerstatsreceiver
+
+## Metrics
+
+These are the metrics available for this scraper.
+
+| Name | Description | Unit | Type | Attributes |
+| ---- | ----------- | ---- | ---- | ---------- |
+| **container.blockio.io_merged_recursive.async** | Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) |
- device_major
- device_minor
|
+| **container.blockio.io_merged_recursive.discard** | Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_merged_recursive.read** | Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_merged_recursive.sync** | Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_merged_recursive.total** | Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_merged_recursive.write** | Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_queued_recursive.async** | Number of requests queued up for this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_queued_recursive.discard** | Number of requests queued up for this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_queued_recursive.read** | Number of requests queued up for this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_queued_recursive.sync** | Number of requests queued up for this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_queued_recursive.total** | Number of requests queued up for this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_queued_recursive.write** | Number of requests queued up for this cgroup and its descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_bytes_recursive.async** | Number of bytes transferred to/from the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | By | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_bytes_recursive.discard** | Number of bytes transferred to/from the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | By | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_bytes_recursive.read** | Number of bytes transferred to/from the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | By | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_bytes_recursive.sync** | Number of bytes transferred to/from the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | By | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_bytes_recursive.total** | Number of bytes transferred to/from the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | By | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_bytes_recursive.write** | Number of bytes transferred to/from the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | By | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_time_recursive.async** | Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_time_recursive.discard** | Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_time_recursive.read** | Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_time_recursive.sync** | Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_time_recursive.total** | Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_service_time_recursive.write** | Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_serviced_recursive.async** | Number of IOs (bio) issued to the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_serviced_recursive.discard** | Number of IOs (bio) issued to the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_serviced_recursive.read** | Number of IOs (bio) issued to the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_serviced_recursive.sync** | Number of IOs (bio) issued to the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_serviced_recursive.total** | Number of IOs (bio) issued to the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_serviced_recursive.write** | Number of IOs (bio) issued to the disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_time_recursive.async** | Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | ms | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_time_recursive.discard** | Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | ms | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_time_recursive.read** | Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | ms | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_time_recursive.sync** | Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | ms | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_time_recursive.total** | Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | ms | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_time_recursive.write** | Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | ms | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_wait_time_recursive.async** | Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_wait_time_recursive.discard** | Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_wait_time_recursive.read** | Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_wait_time_recursive.sync** | Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_wait_time_recursive.total** | Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.io_wait_time_recursive.write** | Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.sectors_recursive.async** | Number of sectors transferred to/from disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.sectors_recursive.discard** | Number of sectors transferred to/from disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.sectors_recursive.read** | Number of sectors transferred to/from disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.sectors_recursive.sync** | Number of sectors transferred to/from disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.sectors_recursive.total** | Number of sectors transferred to/from disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.blockio.sectors_recursive.write** | Number of sectors transferred to/from disk by the group and descendant groups. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt). | 1 | Sum(Int) | - device_major
- device_minor
|
+| **container.cpu.percent** | Percent of CPU used by the container. | 1 | Gauge(Double) | |
+| **container.cpu.throttling_data.periods** | Number of periods with throttling active. | 1 | Sum(Int) | |
+| **container.cpu.throttling_data.throttled_periods** | Number of periods when the container hits its throttling limit. | 1 | Sum(Int) | |
+| **container.cpu.throttling_data.throttled_time** | Aggregate time the container was throttled. | ns | Sum(Int) | |
+| **container.cpu.usage.kernelmode** | Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). | ns | Sum(Int) | |
+| container.cpu.usage.percpu | Per-core CPU usage by the container. | ns | Sum(Int) | |
+| **container.cpu.usage.system** | System CPU usage. | ns | Sum(Int) | |
+| **container.cpu.usage.total** | Total CPU time consumed. | ns | Sum(Int) | |
+| **container.cpu.usage.usermode** | Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). | ns | Sum(Int) | |
+| **container.memory.active_anon** | The amount of anonymous memory that has been identified as active by the kernel. | By | Gauge(Int) | |
+| **container.memory.active_file** | Cache memory that has been identified as active by the kernel. [More docs](https://docs.docker.com/config/containers/runmetrics/) | By | Gauge(Int) | |
+| **container.memory.cache** | The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device. | By | Gauge(Int) | |
+| **container.memory.dirty** | Bytes that are waiting to get written back to the disk, from this cgroup. | By | Gauge(Int) | |
+| **container.memory.hierarchical_memory_limit** | The maximum amount of physical memory that can be used by the processes of this control group. | By | Gauge(Int) | |
+| **container.memory.hierarchical_memsw_limit** | The maximum amount of RAM + swap that can be used by the processes of this control group. | By | Gauge(Int) | |
+| **container.memory.inactive_anon** | The amount of anonymous memory that has been identified as inactive by the kernel. | By | Gauge(Int) | |
+| **container.memory.inactive_file** | Cache memory that has been identified as inactive by the kernel. [More docs](https://docs.docker.com/config/containers/runmetrics/) | By | Gauge(Int) | |
+| **container.memory.mapped_file** | Indicates the amount of memory mapped by the processes in the control group. | By | Gauge(Int) | |
+| **container.memory.percent** | Percentage of memory used. | 1 | Gauge(Double) | |
+| **container.memory.pgfault** | Indicate the number of times that a process of the cgroup triggered a page fault. | 1 | Sum(Int) | |
+| **container.memory.pgmajfault** | Indicate the number of times that a process of the cgroup triggered a major fault. | 1 | Sum(Int) | |
+| **container.memory.pgpgin** | Number of pages read from disk by the cgroup. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt). | 1 | Sum(Int) | |
+| **container.memory.pgpgout** | Number of pages written to disk by the cgroup. [More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt). | 1 | Sum(Int) | |
+| **container.memory.rss** | The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. | By | Gauge(Int) | |
+| **container.memory.rss_huge** | Number of bytes of anonymous transparent hugepages in this cgroup. | By | Gauge(Int) | |
+| **container.memory.swap** | The amount of swap currently used by the processes in this cgroup. | By | Gauge(Int) | |
+| **container.memory.total_active_anon** | The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups. | By | Gauge(Int) | |
+| **container.memory.total_active_file** | Cache memory that has been identified as active by the kernel. Includes descendant cgroups. [More docs](https://docs.docker.com/config/containers/runmetrics/). | By | Gauge(Int) | |
+| **container.memory.total_cache** | Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs. | By | Gauge(Int) | |
+| **container.memory.total_dirty** | Bytes that are waiting to get written back to the disk, from this cgroup and descendants. | By | Gauge(Int) | |
+| **container.memory.total_inactive_anon** | The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups. | By | Gauge(Int) | |
+| **container.memory.total_inactive_file** | Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups. [More docs](https://docs.docker.com/config/containers/runmetrics/). | By | Gauge(Int) | |
+| **container.memory.total_mapped_file** | Indicates the amount of memory mapped by the processes in the control group and descendant groups. | By | Gauge(Int) | |
+| **container.memory.total_pgfault** | Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault. | 1 | Sum(Int) | |
+| **container.memory.total_pgmajfault** | Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault. | 1 | Sum(Int) | |
+| **container.memory.total_pgpgin** | Number of pages read from disk by the cgroup and descendant groups. | 1 | Sum(Int) | |
+| **container.memory.total_pgpgout** | Number of pages written to disk by the cgroup and descendant groups. | 1 | Sum(Int) | |
+| **container.memory.total_rss** | The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups. | By | Gauge(Int) | |
+| **container.memory.total_rss_huge** | Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups. | By | Gauge(Int) | |
+| **container.memory.total_swap** | The amount of swap currently used by the processes in this cgroup and descendant groups. | By | Gauge(Int) | |
+| **container.memory.total_unevictable** | The amount of memory that cannot be reclaimed. Includes descendant cgroups. | By | Gauge(Int) | |
+| **container.memory.total_writeback** | Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants. | By | Gauge(Int) | |
+| **container.memory.unevictable** | The amount of memory that cannot be reclaimed. | By | Gauge(Int) | |
+| **container.memory.usage.limit** | Memory limit of the container. | By | Gauge(Int) | |
+| **container.memory.usage.max** | Maximum memory usage. | By | Gauge(Int) | |
+| **container.memory.usage.total** | Memory usage of the container. This excludes the total cache. | By | Gauge(Int) | |
+| **container.memory.writeback** | Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup. | By | Gauge(Int) | |
+| **container.network.io.usage.rx_bytes** | Bytes received by the container. | By | Sum(Int) | |
+| **container.network.io.usage.rx_dropped** | Incoming packets dropped. | 1 | Sum(Int) | |
+| **container.network.io.usage.rx_errors** | Received errors. | 1 | Sum(Int) | |
+| **container.network.io.usage.rx_packets** | Packets received. | 1 | Sum(Int) | |
+| **container.network.io.usage.tx_bytes** | Bytes sent. | By | Sum(Int) | |
+| **container.network.io.usage.tx_dropped** | Outgoing packets dropped. | 1 | Sum(Int) | |
+| **container.network.io.usage.tx_errors** | Sent errors. | 1 | Sum(Int) | |
+| **container.network.io.usage.tx_packets** | Packets sent. | 1 | Sum(Int) | |
+
+**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default.
+Any metric can be enabled or disabled with the following scraper configuration:
+
+```yaml
+metrics:
+ :
+ enabled:
+```
+
+## Resource attributes
+
+| Name | Description | Type |
+| ---- | ----------- | ---- |
+| container.hostname | The hostname of the container. | String |
+| container.id | The ID of the container. | String |
+| container.image.name | The name of the docker image in use by the container. | String |
+| container.name | The name of the container. | String |
+| container.runtime | The runtime of the container. For this receiver, it will always be 'docker'. | String |
+
+## Metric attributes
+
+| Name | Description | Values |
+| ---- | ----------- | ------ |
+| core | The CPU core number when utilising per-CPU metrics. | |
+| device_major | Device major number for block IO operations. | |
+| device_minor | Device minor number for block IO operations. | |
+| interface | Network interface. | |
diff --git a/receiver/dockerstatsreceiver/factory.go b/receiver/dockerstatsreceiver/factory.go
index cc0a4275e013..1b8aec323119 100644
--- a/receiver/dockerstatsreceiver/factory.go
+++ b/receiver/dockerstatsreceiver/factory.go
@@ -22,6 +22,8 @@ import (
"go.opentelemetry.io/collector/config"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver/scraperhelper"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver/internal/metadata"
)
const (
@@ -44,21 +46,23 @@ func createDefaultConfig() config.Receiver {
Endpoint: "unix:///var/run/docker.sock",
Timeout: 5 * time.Second,
DockerAPIVersion: defaultDockerAPIVersion,
+ MetricsConfig: metadata.DefaultMetricsSettings(),
}
}
func createMetricsReceiver(
- ctx context.Context,
+ _ context.Context,
params component.ReceiverCreateSettings,
config config.Receiver,
consumer consumer.Metrics,
) (component.MetricsReceiver, error) {
dockerConfig := config.(*Config)
+ dsr := newReceiver(params, dockerConfig)
- dsr, err := NewReceiver(ctx, params, dockerConfig, consumer)
+ scrp, err := scraperhelper.NewScraper(typeStr, dsr.scrape, scraperhelper.WithStart(dsr.start))
if err != nil {
return nil, err
}
- return dsr, nil
+ return scraperhelper.NewScraperControllerReceiver(&dsr.config.ScraperControllerSettings, params, consumer, scraperhelper.AddScraper(scrp))
}
diff --git a/receiver/dockerstatsreceiver/go.mod b/receiver/dockerstatsreceiver/go.mod
index 31de3ea7750d..8d2efdfebeda 100644
--- a/receiver/dockerstatsreceiver/go.mod
+++ b/receiver/dockerstatsreceiver/go.mod
@@ -6,6 +6,7 @@ require (
github.com/docker/docker v20.10.17+incompatible
github.com/open-telemetry/opentelemetry-collector-contrib/internal/containertest v0.56.0
github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.56.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.56.0
github.com/stretchr/testify v1.8.0
go.opentelemetry.io/collector v0.56.0
go.opentelemetry.io/collector/pdata v0.56.0
@@ -21,7 +22,7 @@ require (
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.0-beta.1+incompatible // indirect
- github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect
+ github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gobwas/glob v0.2.3 // indirect
@@ -29,7 +30,7 @@ require (
github.com/golang/protobuf v1.5.2 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/knadh/koanf v1.4.2 // indirect
- github.com/kr/pretty v0.3.0 // indirect
+ github.com/kr/text v0.2.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
@@ -40,6 +41,7 @@ require (
github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/rogpeppe/go-internal v1.6.1 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel v1.8.0 // indirect
@@ -60,5 +62,7 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/conta
replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker => ../../internal/docker
+replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest
+
// see https://github.com/distribution/distribution/issues/3590
exclude github.com/docker/distribution v2.8.0+incompatible
diff --git a/receiver/dockerstatsreceiver/go.sum b/receiver/dockerstatsreceiver/go.sum
index 044c1d89b3e7..14b6a2da4753 100644
--- a/receiver/dockerstatsreceiver/go.sum
+++ b/receiver/dockerstatsreceiver/go.sum
@@ -3,7 +3,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -45,8 +44,8 @@ github.com/docker/distribution v2.8.0-beta.1+incompatible h1:9MjVa+OTMHm4C0kKZB6
github.com/docker/distribution v2.8.0-beta.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y=
-github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -135,11 +134,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/knadh/koanf v1.4.2 h1:2itp+cdC6miId4pO4Jw7c/3eiYD26Z/Sz3ATJMwHxIs=
github.com/knadh/koanf v1.4.2/go.mod h1:4NCo0q4pmU398vF9vq2jStF9MWQZ8JEDcDMHlDCr4h0=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -192,7 +189,6 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
@@ -200,7 +196,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -276,12 +271,10 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_v2.go b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_v2.go
new file mode 100644
index 000000000000..c8ae1cbd4516
--- /dev/null
+++ b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_v2.go
@@ -0,0 +1,6756 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "time"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+ conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
+)
+
+// MetricSettings provides common settings for a particular metric.
+type MetricSettings struct {
+ Enabled bool `mapstructure:"enabled"`
+}
+
+// MetricsSettings provides settings for dockerstatsreceiver metrics.
+type MetricsSettings struct {
+ ContainerBlockioIoMergedRecursiveAsync MetricSettings `mapstructure:"container.blockio.io_merged_recursive.async"`
+ ContainerBlockioIoMergedRecursiveDiscard MetricSettings `mapstructure:"container.blockio.io_merged_recursive.discard"`
+ ContainerBlockioIoMergedRecursiveRead MetricSettings `mapstructure:"container.blockio.io_merged_recursive.read"`
+ ContainerBlockioIoMergedRecursiveSync MetricSettings `mapstructure:"container.blockio.io_merged_recursive.sync"`
+ ContainerBlockioIoMergedRecursiveTotal MetricSettings `mapstructure:"container.blockio.io_merged_recursive.total"`
+ ContainerBlockioIoMergedRecursiveWrite MetricSettings `mapstructure:"container.blockio.io_merged_recursive.write"`
+ ContainerBlockioIoQueuedRecursiveAsync MetricSettings `mapstructure:"container.blockio.io_queued_recursive.async"`
+ ContainerBlockioIoQueuedRecursiveDiscard MetricSettings `mapstructure:"container.blockio.io_queued_recursive.discard"`
+ ContainerBlockioIoQueuedRecursiveRead MetricSettings `mapstructure:"container.blockio.io_queued_recursive.read"`
+ ContainerBlockioIoQueuedRecursiveSync MetricSettings `mapstructure:"container.blockio.io_queued_recursive.sync"`
+ ContainerBlockioIoQueuedRecursiveTotal MetricSettings `mapstructure:"container.blockio.io_queued_recursive.total"`
+ ContainerBlockioIoQueuedRecursiveWrite MetricSettings `mapstructure:"container.blockio.io_queued_recursive.write"`
+ ContainerBlockioIoServiceBytesRecursiveAsync MetricSettings `mapstructure:"container.blockio.io_service_bytes_recursive.async"`
+ ContainerBlockioIoServiceBytesRecursiveDiscard MetricSettings `mapstructure:"container.blockio.io_service_bytes_recursive.discard"`
+ ContainerBlockioIoServiceBytesRecursiveRead MetricSettings `mapstructure:"container.blockio.io_service_bytes_recursive.read"`
+ ContainerBlockioIoServiceBytesRecursiveSync MetricSettings `mapstructure:"container.blockio.io_service_bytes_recursive.sync"`
+ ContainerBlockioIoServiceBytesRecursiveTotal MetricSettings `mapstructure:"container.blockio.io_service_bytes_recursive.total"`
+ ContainerBlockioIoServiceBytesRecursiveWrite MetricSettings `mapstructure:"container.blockio.io_service_bytes_recursive.write"`
+ ContainerBlockioIoServiceTimeRecursiveAsync MetricSettings `mapstructure:"container.blockio.io_service_time_recursive.async"`
+ ContainerBlockioIoServiceTimeRecursiveDiscard MetricSettings `mapstructure:"container.blockio.io_service_time_recursive.discard"`
+ ContainerBlockioIoServiceTimeRecursiveRead MetricSettings `mapstructure:"container.blockio.io_service_time_recursive.read"`
+ ContainerBlockioIoServiceTimeRecursiveSync MetricSettings `mapstructure:"container.blockio.io_service_time_recursive.sync"`
+ ContainerBlockioIoServiceTimeRecursiveTotal MetricSettings `mapstructure:"container.blockio.io_service_time_recursive.total"`
+ ContainerBlockioIoServiceTimeRecursiveWrite MetricSettings `mapstructure:"container.blockio.io_service_time_recursive.write"`
+ ContainerBlockioIoServicedRecursiveAsync MetricSettings `mapstructure:"container.blockio.io_serviced_recursive.async"`
+ ContainerBlockioIoServicedRecursiveDiscard MetricSettings `mapstructure:"container.blockio.io_serviced_recursive.discard"`
+ ContainerBlockioIoServicedRecursiveRead MetricSettings `mapstructure:"container.blockio.io_serviced_recursive.read"`
+ ContainerBlockioIoServicedRecursiveSync MetricSettings `mapstructure:"container.blockio.io_serviced_recursive.sync"`
+ ContainerBlockioIoServicedRecursiveTotal MetricSettings `mapstructure:"container.blockio.io_serviced_recursive.total"`
+ ContainerBlockioIoServicedRecursiveWrite MetricSettings `mapstructure:"container.blockio.io_serviced_recursive.write"`
+ ContainerBlockioIoTimeRecursiveAsync MetricSettings `mapstructure:"container.blockio.io_time_recursive.async"`
+ ContainerBlockioIoTimeRecursiveDiscard MetricSettings `mapstructure:"container.blockio.io_time_recursive.discard"`
+ ContainerBlockioIoTimeRecursiveRead MetricSettings `mapstructure:"container.blockio.io_time_recursive.read"`
+ ContainerBlockioIoTimeRecursiveSync MetricSettings `mapstructure:"container.blockio.io_time_recursive.sync"`
+ ContainerBlockioIoTimeRecursiveTotal MetricSettings `mapstructure:"container.blockio.io_time_recursive.total"`
+ ContainerBlockioIoTimeRecursiveWrite MetricSettings `mapstructure:"container.blockio.io_time_recursive.write"`
+ ContainerBlockioIoWaitTimeRecursiveAsync MetricSettings `mapstructure:"container.blockio.io_wait_time_recursive.async"`
+ ContainerBlockioIoWaitTimeRecursiveDiscard MetricSettings `mapstructure:"container.blockio.io_wait_time_recursive.discard"`
+ ContainerBlockioIoWaitTimeRecursiveRead MetricSettings `mapstructure:"container.blockio.io_wait_time_recursive.read"`
+ ContainerBlockioIoWaitTimeRecursiveSync MetricSettings `mapstructure:"container.blockio.io_wait_time_recursive.sync"`
+ ContainerBlockioIoWaitTimeRecursiveTotal MetricSettings `mapstructure:"container.blockio.io_wait_time_recursive.total"`
+ ContainerBlockioIoWaitTimeRecursiveWrite MetricSettings `mapstructure:"container.blockio.io_wait_time_recursive.write"`
+ ContainerBlockioSectorsRecursiveAsync MetricSettings `mapstructure:"container.blockio.sectors_recursive.async"`
+ ContainerBlockioSectorsRecursiveDiscard MetricSettings `mapstructure:"container.blockio.sectors_recursive.discard"`
+ ContainerBlockioSectorsRecursiveRead MetricSettings `mapstructure:"container.blockio.sectors_recursive.read"`
+ ContainerBlockioSectorsRecursiveSync MetricSettings `mapstructure:"container.blockio.sectors_recursive.sync"`
+ ContainerBlockioSectorsRecursiveTotal MetricSettings `mapstructure:"container.blockio.sectors_recursive.total"`
+ ContainerBlockioSectorsRecursiveWrite MetricSettings `mapstructure:"container.blockio.sectors_recursive.write"`
+ ContainerCPUPercent MetricSettings `mapstructure:"container.cpu.percent"`
+ ContainerCPUThrottlingDataPeriods MetricSettings `mapstructure:"container.cpu.throttling_data.periods"`
+ ContainerCPUThrottlingDataThrottledPeriods MetricSettings `mapstructure:"container.cpu.throttling_data.throttled_periods"`
+ ContainerCPUThrottlingDataThrottledTime MetricSettings `mapstructure:"container.cpu.throttling_data.throttled_time"`
+ ContainerCPUUsageKernelmode MetricSettings `mapstructure:"container.cpu.usage.kernelmode"`
+ ContainerCPUUsagePercpu MetricSettings `mapstructure:"container.cpu.usage.percpu"`
+ ContainerCPUUsageSystem MetricSettings `mapstructure:"container.cpu.usage.system"`
+ ContainerCPUUsageTotal MetricSettings `mapstructure:"container.cpu.usage.total"`
+ ContainerCPUUsageUsermode MetricSettings `mapstructure:"container.cpu.usage.usermode"`
+ ContainerMemoryActiveAnon MetricSettings `mapstructure:"container.memory.active_anon"`
+ ContainerMemoryActiveFile MetricSettings `mapstructure:"container.memory.active_file"`
+ ContainerMemoryCache MetricSettings `mapstructure:"container.memory.cache"`
+ ContainerMemoryDirty MetricSettings `mapstructure:"container.memory.dirty"`
+ ContainerMemoryHierarchicalMemoryLimit MetricSettings `mapstructure:"container.memory.hierarchical_memory_limit"`
+ ContainerMemoryHierarchicalMemswLimit MetricSettings `mapstructure:"container.memory.hierarchical_memsw_limit"`
+ ContainerMemoryInactiveAnon MetricSettings `mapstructure:"container.memory.inactive_anon"`
+ ContainerMemoryInactiveFile MetricSettings `mapstructure:"container.memory.inactive_file"`
+ ContainerMemoryMappedFile MetricSettings `mapstructure:"container.memory.mapped_file"`
+ ContainerMemoryPercent MetricSettings `mapstructure:"container.memory.percent"`
+ ContainerMemoryPgfault MetricSettings `mapstructure:"container.memory.pgfault"`
+ ContainerMemoryPgmajfault MetricSettings `mapstructure:"container.memory.pgmajfault"`
+ ContainerMemoryPgpgin MetricSettings `mapstructure:"container.memory.pgpgin"`
+ ContainerMemoryPgpgout MetricSettings `mapstructure:"container.memory.pgpgout"`
+ ContainerMemoryRss MetricSettings `mapstructure:"container.memory.rss"`
+ ContainerMemoryRssHuge MetricSettings `mapstructure:"container.memory.rss_huge"`
+ ContainerMemorySwap MetricSettings `mapstructure:"container.memory.swap"`
+ ContainerMemoryTotalActiveAnon MetricSettings `mapstructure:"container.memory.total_active_anon"`
+ ContainerMemoryTotalActiveFile MetricSettings `mapstructure:"container.memory.total_active_file"`
+ ContainerMemoryTotalCache MetricSettings `mapstructure:"container.memory.total_cache"`
+ ContainerMemoryTotalDirty MetricSettings `mapstructure:"container.memory.total_dirty"`
+ ContainerMemoryTotalInactiveAnon MetricSettings `mapstructure:"container.memory.total_inactive_anon"`
+ ContainerMemoryTotalInactiveFile MetricSettings `mapstructure:"container.memory.total_inactive_file"`
+ ContainerMemoryTotalMappedFile MetricSettings `mapstructure:"container.memory.total_mapped_file"`
+ ContainerMemoryTotalPgfault MetricSettings `mapstructure:"container.memory.total_pgfault"`
+ ContainerMemoryTotalPgmajfault MetricSettings `mapstructure:"container.memory.total_pgmajfault"`
+ ContainerMemoryTotalPgpgin MetricSettings `mapstructure:"container.memory.total_pgpgin"`
+ ContainerMemoryTotalPgpgout MetricSettings `mapstructure:"container.memory.total_pgpgout"`
+ ContainerMemoryTotalRss MetricSettings `mapstructure:"container.memory.total_rss"`
+ ContainerMemoryTotalRssHuge MetricSettings `mapstructure:"container.memory.total_rss_huge"`
+ ContainerMemoryTotalSwap MetricSettings `mapstructure:"container.memory.total_swap"`
+ ContainerMemoryTotalUnevictable MetricSettings `mapstructure:"container.memory.total_unevictable"`
+ ContainerMemoryTotalWriteback MetricSettings `mapstructure:"container.memory.total_writeback"`
+ ContainerMemoryUnevictable MetricSettings `mapstructure:"container.memory.unevictable"`
+ ContainerMemoryUsageLimit MetricSettings `mapstructure:"container.memory.usage.limit"`
+ ContainerMemoryUsageMax MetricSettings `mapstructure:"container.memory.usage.max"`
+ ContainerMemoryUsageTotal MetricSettings `mapstructure:"container.memory.usage.total"`
+ ContainerMemoryWriteback MetricSettings `mapstructure:"container.memory.writeback"`
+ ContainerNetworkIoUsageRxBytes MetricSettings `mapstructure:"container.network.io.usage.rx_bytes"`
+ ContainerNetworkIoUsageRxDropped MetricSettings `mapstructure:"container.network.io.usage.rx_dropped"`
+ ContainerNetworkIoUsageRxErrors MetricSettings `mapstructure:"container.network.io.usage.rx_errors"`
+ ContainerNetworkIoUsageRxPackets MetricSettings `mapstructure:"container.network.io.usage.rx_packets"`
+ ContainerNetworkIoUsageTxBytes MetricSettings `mapstructure:"container.network.io.usage.tx_bytes"`
+ ContainerNetworkIoUsageTxDropped MetricSettings `mapstructure:"container.network.io.usage.tx_dropped"`
+ ContainerNetworkIoUsageTxErrors MetricSettings `mapstructure:"container.network.io.usage.tx_errors"`
+ ContainerNetworkIoUsageTxPackets MetricSettings `mapstructure:"container.network.io.usage.tx_packets"`
+}
+
+func DefaultMetricsSettings() MetricsSettings {
+ return MetricsSettings{
+ ContainerBlockioIoMergedRecursiveAsync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoMergedRecursiveDiscard: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoMergedRecursiveRead: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoMergedRecursiveSync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoMergedRecursiveTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoMergedRecursiveWrite: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoQueuedRecursiveAsync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoQueuedRecursiveDiscard: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoQueuedRecursiveRead: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoQueuedRecursiveSync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoQueuedRecursiveTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoQueuedRecursiveWrite: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceBytesRecursiveAsync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceBytesRecursiveDiscard: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceBytesRecursiveRead: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceBytesRecursiveSync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceBytesRecursiveTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceBytesRecursiveWrite: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceTimeRecursiveAsync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceTimeRecursiveDiscard: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceTimeRecursiveRead: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceTimeRecursiveSync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceTimeRecursiveTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServiceTimeRecursiveWrite: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServicedRecursiveAsync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServicedRecursiveDiscard: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServicedRecursiveRead: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServicedRecursiveSync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServicedRecursiveTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoServicedRecursiveWrite: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoTimeRecursiveAsync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoTimeRecursiveDiscard: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoTimeRecursiveRead: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoTimeRecursiveSync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoTimeRecursiveTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoTimeRecursiveWrite: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoWaitTimeRecursiveAsync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoWaitTimeRecursiveDiscard: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoWaitTimeRecursiveRead: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoWaitTimeRecursiveSync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoWaitTimeRecursiveTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioIoWaitTimeRecursiveWrite: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioSectorsRecursiveAsync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioSectorsRecursiveDiscard: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioSectorsRecursiveRead: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioSectorsRecursiveSync: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioSectorsRecursiveTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerBlockioSectorsRecursiveWrite: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUPercent: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUThrottlingDataPeriods: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUThrottlingDataThrottledPeriods: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUThrottlingDataThrottledTime: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUUsageKernelmode: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUUsagePercpu: MetricSettings{
+ Enabled: false,
+ },
+ ContainerCPUUsageSystem: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUUsageTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerCPUUsageUsermode: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryActiveAnon: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryActiveFile: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryCache: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryDirty: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryHierarchicalMemoryLimit: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryHierarchicalMemswLimit: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryInactiveAnon: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryInactiveFile: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryMappedFile: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryPercent: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryPgfault: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryPgmajfault: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryPgpgin: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryPgpgout: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryRss: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryRssHuge: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemorySwap: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalActiveAnon: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalActiveFile: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalCache: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalDirty: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalInactiveAnon: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalInactiveFile: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalMappedFile: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalPgfault: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalPgmajfault: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalPgpgin: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalPgpgout: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalRss: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalRssHuge: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalSwap: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalUnevictable: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryTotalWriteback: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryUnevictable: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryUsageLimit: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryUsageMax: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryUsageTotal: MetricSettings{
+ Enabled: true,
+ },
+ ContainerMemoryWriteback: MetricSettings{
+ Enabled: true,
+ },
+ ContainerNetworkIoUsageRxBytes: MetricSettings{
+ Enabled: true,
+ },
+ ContainerNetworkIoUsageRxDropped: MetricSettings{
+ Enabled: true,
+ },
+ ContainerNetworkIoUsageRxErrors: MetricSettings{
+ Enabled: true,
+ },
+ ContainerNetworkIoUsageRxPackets: MetricSettings{
+ Enabled: true,
+ },
+ ContainerNetworkIoUsageTxBytes: MetricSettings{
+ Enabled: true,
+ },
+ ContainerNetworkIoUsageTxDropped: MetricSettings{
+ Enabled: true,
+ },
+ ContainerNetworkIoUsageTxErrors: MetricSettings{
+ Enabled: true,
+ },
+ ContainerNetworkIoUsageTxPackets: MetricSettings{
+ Enabled: true,
+ },
+ }
+}
+
+type metricContainerBlockioIoMergedRecursiveAsync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_merged_recursive.async metric with initial data.
+func (m *metricContainerBlockioIoMergedRecursiveAsync) init() {
+ m.data.SetName("container.blockio.io_merged_recursive.async")
+ m.data.SetDescription("Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoMergedRecursiveAsync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoMergedRecursiveAsync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoMergedRecursiveAsync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoMergedRecursiveAsync(settings MetricSettings) metricContainerBlockioIoMergedRecursiveAsync {
+ m := metricContainerBlockioIoMergedRecursiveAsync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoMergedRecursiveDiscard struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_merged_recursive.discard metric with initial data.
+func (m *metricContainerBlockioIoMergedRecursiveDiscard) init() {
+ m.data.SetName("container.blockio.io_merged_recursive.discard")
+ m.data.SetDescription("Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoMergedRecursiveDiscard) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoMergedRecursiveDiscard) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoMergedRecursiveDiscard) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoMergedRecursiveDiscard(settings MetricSettings) metricContainerBlockioIoMergedRecursiveDiscard {
+ m := metricContainerBlockioIoMergedRecursiveDiscard{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoMergedRecursiveRead struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_merged_recursive.read metric with initial data.
+func (m *metricContainerBlockioIoMergedRecursiveRead) init() {
+ m.data.SetName("container.blockio.io_merged_recursive.read")
+ m.data.SetDescription("Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoMergedRecursiveRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoMergedRecursiveRead) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoMergedRecursiveRead) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoMergedRecursiveRead(settings MetricSettings) metricContainerBlockioIoMergedRecursiveRead {
+ m := metricContainerBlockioIoMergedRecursiveRead{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoMergedRecursiveSync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_merged_recursive.sync metric with initial data.
+func (m *metricContainerBlockioIoMergedRecursiveSync) init() {
+ m.data.SetName("container.blockio.io_merged_recursive.sync")
+ m.data.SetDescription("Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoMergedRecursiveSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoMergedRecursiveSync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoMergedRecursiveSync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoMergedRecursiveSync(settings MetricSettings) metricContainerBlockioIoMergedRecursiveSync {
+ m := metricContainerBlockioIoMergedRecursiveSync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoMergedRecursiveTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_merged_recursive.total metric with initial data.
+func (m *metricContainerBlockioIoMergedRecursiveTotal) init() {
+ m.data.SetName("container.blockio.io_merged_recursive.total")
+ m.data.SetDescription("Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoMergedRecursiveTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoMergedRecursiveTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoMergedRecursiveTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoMergedRecursiveTotal(settings MetricSettings) metricContainerBlockioIoMergedRecursiveTotal {
+ m := metricContainerBlockioIoMergedRecursiveTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoMergedRecursiveWrite struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_merged_recursive.write metric with initial data.
+func (m *metricContainerBlockioIoMergedRecursiveWrite) init() {
+ m.data.SetName("container.blockio.io_merged_recursive.write")
+ m.data.SetDescription("Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoMergedRecursiveWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoMergedRecursiveWrite) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoMergedRecursiveWrite) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoMergedRecursiveWrite(settings MetricSettings) metricContainerBlockioIoMergedRecursiveWrite {
+ m := metricContainerBlockioIoMergedRecursiveWrite{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoQueuedRecursiveAsync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_queued_recursive.async metric with initial data.
+func (m *metricContainerBlockioIoQueuedRecursiveAsync) init() {
+ m.data.SetName("container.blockio.io_queued_recursive.async")
+ m.data.SetDescription("Number of requests queued up for this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoQueuedRecursiveAsync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoQueuedRecursiveAsync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoQueuedRecursiveAsync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoQueuedRecursiveAsync(settings MetricSettings) metricContainerBlockioIoQueuedRecursiveAsync {
+ m := metricContainerBlockioIoQueuedRecursiveAsync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoQueuedRecursiveDiscard struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_queued_recursive.discard metric with initial data.
+func (m *metricContainerBlockioIoQueuedRecursiveDiscard) init() {
+ m.data.SetName("container.blockio.io_queued_recursive.discard")
+ m.data.SetDescription("Number of requests queued up for this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoQueuedRecursiveDiscard) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoQueuedRecursiveDiscard) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoQueuedRecursiveDiscard) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoQueuedRecursiveDiscard(settings MetricSettings) metricContainerBlockioIoQueuedRecursiveDiscard {
+ m := metricContainerBlockioIoQueuedRecursiveDiscard{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoQueuedRecursiveRead struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_queued_recursive.read metric with initial data.
+func (m *metricContainerBlockioIoQueuedRecursiveRead) init() {
+ m.data.SetName("container.blockio.io_queued_recursive.read")
+ m.data.SetDescription("Number of requests queued up for this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoQueuedRecursiveRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoQueuedRecursiveRead) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoQueuedRecursiveRead) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoQueuedRecursiveRead(settings MetricSettings) metricContainerBlockioIoQueuedRecursiveRead {
+ m := metricContainerBlockioIoQueuedRecursiveRead{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoQueuedRecursiveSync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_queued_recursive.sync metric with initial data.
+func (m *metricContainerBlockioIoQueuedRecursiveSync) init() {
+ m.data.SetName("container.blockio.io_queued_recursive.sync")
+ m.data.SetDescription("Number of requests queued up for this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoQueuedRecursiveSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoQueuedRecursiveSync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoQueuedRecursiveSync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoQueuedRecursiveSync(settings MetricSettings) metricContainerBlockioIoQueuedRecursiveSync {
+ m := metricContainerBlockioIoQueuedRecursiveSync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoQueuedRecursiveTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_queued_recursive.total metric with initial data.
+func (m *metricContainerBlockioIoQueuedRecursiveTotal) init() {
+ m.data.SetName("container.blockio.io_queued_recursive.total")
+ m.data.SetDescription("Number of requests queued up for this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoQueuedRecursiveTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoQueuedRecursiveTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoQueuedRecursiveTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoQueuedRecursiveTotal(settings MetricSettings) metricContainerBlockioIoQueuedRecursiveTotal {
+ m := metricContainerBlockioIoQueuedRecursiveTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoQueuedRecursiveWrite struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_queued_recursive.write metric with initial data.
+func (m *metricContainerBlockioIoQueuedRecursiveWrite) init() {
+ m.data.SetName("container.blockio.io_queued_recursive.write")
+ m.data.SetDescription("Number of requests queued up for this cgroup and its descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoQueuedRecursiveWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoQueuedRecursiveWrite) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoQueuedRecursiveWrite) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoQueuedRecursiveWrite(settings MetricSettings) metricContainerBlockioIoQueuedRecursiveWrite {
+ m := metricContainerBlockioIoQueuedRecursiveWrite{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceBytesRecursiveAsync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_bytes_recursive.async metric with initial data.
+func (m *metricContainerBlockioIoServiceBytesRecursiveAsync) init() {
+ m.data.SetName("container.blockio.io_service_bytes_recursive.async")
+ m.data.SetDescription("Number of bytes transferred to/from the disk by the group and descendant groups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceBytesRecursiveAsync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceBytesRecursiveAsync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceBytesRecursiveAsync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceBytesRecursiveAsync(settings MetricSettings) metricContainerBlockioIoServiceBytesRecursiveAsync {
+ m := metricContainerBlockioIoServiceBytesRecursiveAsync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceBytesRecursiveDiscard struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_bytes_recursive.discard metric with initial data.
+func (m *metricContainerBlockioIoServiceBytesRecursiveDiscard) init() {
+ m.data.SetName("container.blockio.io_service_bytes_recursive.discard")
+ m.data.SetDescription("Number of bytes transferred to/from the disk by the group and descendant groups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceBytesRecursiveDiscard) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceBytesRecursiveDiscard) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceBytesRecursiveDiscard) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceBytesRecursiveDiscard(settings MetricSettings) metricContainerBlockioIoServiceBytesRecursiveDiscard {
+ m := metricContainerBlockioIoServiceBytesRecursiveDiscard{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceBytesRecursiveRead struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_bytes_recursive.read metric with initial data.
+func (m *metricContainerBlockioIoServiceBytesRecursiveRead) init() {
+ m.data.SetName("container.blockio.io_service_bytes_recursive.read")
+ m.data.SetDescription("Number of bytes transferred to/from the disk by the group and descendant groups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceBytesRecursiveRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceBytesRecursiveRead) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceBytesRecursiveRead) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceBytesRecursiveRead(settings MetricSettings) metricContainerBlockioIoServiceBytesRecursiveRead {
+ m := metricContainerBlockioIoServiceBytesRecursiveRead{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceBytesRecursiveSync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_bytes_recursive.sync metric with initial data.
+func (m *metricContainerBlockioIoServiceBytesRecursiveSync) init() {
+ m.data.SetName("container.blockio.io_service_bytes_recursive.sync")
+ m.data.SetDescription("Number of bytes transferred to/from the disk by the group and descendant groups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceBytesRecursiveSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceBytesRecursiveSync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceBytesRecursiveSync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceBytesRecursiveSync(settings MetricSettings) metricContainerBlockioIoServiceBytesRecursiveSync {
+ m := metricContainerBlockioIoServiceBytesRecursiveSync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceBytesRecursiveTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_bytes_recursive.total metric with initial data.
+func (m *metricContainerBlockioIoServiceBytesRecursiveTotal) init() {
+ m.data.SetName("container.blockio.io_service_bytes_recursive.total")
+ m.data.SetDescription("Number of bytes transferred to/from the disk by the group and descendant groups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceBytesRecursiveTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceBytesRecursiveTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceBytesRecursiveTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceBytesRecursiveTotal(settings MetricSettings) metricContainerBlockioIoServiceBytesRecursiveTotal {
+ m := metricContainerBlockioIoServiceBytesRecursiveTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceBytesRecursiveWrite struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_bytes_recursive.write metric with initial data.
+func (m *metricContainerBlockioIoServiceBytesRecursiveWrite) init() {
+ m.data.SetName("container.blockio.io_service_bytes_recursive.write")
+ m.data.SetDescription("Number of bytes transferred to/from the disk by the group and descendant groups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceBytesRecursiveWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceBytesRecursiveWrite) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceBytesRecursiveWrite) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceBytesRecursiveWrite(settings MetricSettings) metricContainerBlockioIoServiceBytesRecursiveWrite {
+ m := metricContainerBlockioIoServiceBytesRecursiveWrite{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceTimeRecursiveAsync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_time_recursive.async metric with initial data.
+func (m *metricContainerBlockioIoServiceTimeRecursiveAsync) init() {
+ m.data.SetName("container.blockio.io_service_time_recursive.async")
+ m.data.SetDescription("Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceTimeRecursiveAsync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceTimeRecursiveAsync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceTimeRecursiveAsync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceTimeRecursiveAsync(settings MetricSettings) metricContainerBlockioIoServiceTimeRecursiveAsync {
+ m := metricContainerBlockioIoServiceTimeRecursiveAsync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceTimeRecursiveDiscard struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_time_recursive.discard metric with initial data.
+func (m *metricContainerBlockioIoServiceTimeRecursiveDiscard) init() {
+ m.data.SetName("container.blockio.io_service_time_recursive.discard")
+ m.data.SetDescription("Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceTimeRecursiveDiscard) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceTimeRecursiveDiscard) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceTimeRecursiveDiscard) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceTimeRecursiveDiscard(settings MetricSettings) metricContainerBlockioIoServiceTimeRecursiveDiscard {
+ m := metricContainerBlockioIoServiceTimeRecursiveDiscard{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceTimeRecursiveRead struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_time_recursive.read metric with initial data.
+func (m *metricContainerBlockioIoServiceTimeRecursiveRead) init() {
+ m.data.SetName("container.blockio.io_service_time_recursive.read")
+ m.data.SetDescription("Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceTimeRecursiveRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceTimeRecursiveRead) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceTimeRecursiveRead) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceTimeRecursiveRead(settings MetricSettings) metricContainerBlockioIoServiceTimeRecursiveRead {
+ m := metricContainerBlockioIoServiceTimeRecursiveRead{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceTimeRecursiveSync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_time_recursive.sync metric with initial data.
+func (m *metricContainerBlockioIoServiceTimeRecursiveSync) init() {
+ m.data.SetName("container.blockio.io_service_time_recursive.sync")
+ m.data.SetDescription("Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceTimeRecursiveSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceTimeRecursiveSync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceTimeRecursiveSync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceTimeRecursiveSync(settings MetricSettings) metricContainerBlockioIoServiceTimeRecursiveSync {
+ m := metricContainerBlockioIoServiceTimeRecursiveSync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceTimeRecursiveTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_time_recursive.total metric with initial data.
+func (m *metricContainerBlockioIoServiceTimeRecursiveTotal) init() {
+ m.data.SetName("container.blockio.io_service_time_recursive.total")
+ m.data.SetDescription("Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceTimeRecursiveTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceTimeRecursiveTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceTimeRecursiveTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceTimeRecursiveTotal(settings MetricSettings) metricContainerBlockioIoServiceTimeRecursiveTotal {
+ m := metricContainerBlockioIoServiceTimeRecursiveTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServiceTimeRecursiveWrite struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_service_time_recursive.write metric with initial data.
+func (m *metricContainerBlockioIoServiceTimeRecursiveWrite) init() {
+ m.data.SetName("container.blockio.io_service_time_recursive.write")
+ m.data.SetDescription("Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServiceTimeRecursiveWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServiceTimeRecursiveWrite) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServiceTimeRecursiveWrite) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServiceTimeRecursiveWrite(settings MetricSettings) metricContainerBlockioIoServiceTimeRecursiveWrite {
+ m := metricContainerBlockioIoServiceTimeRecursiveWrite{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServicedRecursiveAsync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_serviced_recursive.async metric with initial data.
+func (m *metricContainerBlockioIoServicedRecursiveAsync) init() {
+ m.data.SetName("container.blockio.io_serviced_recursive.async")
+ m.data.SetDescription("Number of IOs (bio) issued to the disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServicedRecursiveAsync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServicedRecursiveAsync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServicedRecursiveAsync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServicedRecursiveAsync(settings MetricSettings) metricContainerBlockioIoServicedRecursiveAsync {
+ m := metricContainerBlockioIoServicedRecursiveAsync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServicedRecursiveDiscard struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_serviced_recursive.discard metric with initial data.
+func (m *metricContainerBlockioIoServicedRecursiveDiscard) init() {
+ m.data.SetName("container.blockio.io_serviced_recursive.discard")
+ m.data.SetDescription("Number of IOs (bio) issued to the disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServicedRecursiveDiscard) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServicedRecursiveDiscard) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServicedRecursiveDiscard) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServicedRecursiveDiscard(settings MetricSettings) metricContainerBlockioIoServicedRecursiveDiscard {
+ m := metricContainerBlockioIoServicedRecursiveDiscard{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServicedRecursiveRead struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_serviced_recursive.read metric with initial data.
+func (m *metricContainerBlockioIoServicedRecursiveRead) init() {
+ m.data.SetName("container.blockio.io_serviced_recursive.read")
+ m.data.SetDescription("Number of IOs (bio) issued to the disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServicedRecursiveRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServicedRecursiveRead) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServicedRecursiveRead) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServicedRecursiveRead(settings MetricSettings) metricContainerBlockioIoServicedRecursiveRead {
+ m := metricContainerBlockioIoServicedRecursiveRead{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServicedRecursiveSync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_serviced_recursive.sync metric with initial data.
+func (m *metricContainerBlockioIoServicedRecursiveSync) init() {
+ m.data.SetName("container.blockio.io_serviced_recursive.sync")
+ m.data.SetDescription("Number of IOs (bio) issued to the disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServicedRecursiveSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServicedRecursiveSync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServicedRecursiveSync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServicedRecursiveSync(settings MetricSettings) metricContainerBlockioIoServicedRecursiveSync {
+ m := metricContainerBlockioIoServicedRecursiveSync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServicedRecursiveTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_serviced_recursive.total metric with initial data.
+func (m *metricContainerBlockioIoServicedRecursiveTotal) init() {
+ m.data.SetName("container.blockio.io_serviced_recursive.total")
+ m.data.SetDescription("Number of IOs (bio) issued to the disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServicedRecursiveTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServicedRecursiveTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServicedRecursiveTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServicedRecursiveTotal(settings MetricSettings) metricContainerBlockioIoServicedRecursiveTotal {
+ m := metricContainerBlockioIoServicedRecursiveTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoServicedRecursiveWrite struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_serviced_recursive.write metric with initial data.
+func (m *metricContainerBlockioIoServicedRecursiveWrite) init() {
+ m.data.SetName("container.blockio.io_serviced_recursive.write")
+ m.data.SetDescription("Number of IOs (bio) issued to the disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoServicedRecursiveWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoServicedRecursiveWrite) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoServicedRecursiveWrite) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoServicedRecursiveWrite(settings MetricSettings) metricContainerBlockioIoServicedRecursiveWrite {
+ m := metricContainerBlockioIoServicedRecursiveWrite{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoTimeRecursiveAsync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_time_recursive.async metric with initial data.
+func (m *metricContainerBlockioIoTimeRecursiveAsync) init() {
+ m.data.SetName("container.blockio.io_time_recursive.async")
+ m.data.SetDescription("Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds.")
+ m.data.SetUnit("ms")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoTimeRecursiveAsync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoTimeRecursiveAsync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoTimeRecursiveAsync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoTimeRecursiveAsync(settings MetricSettings) metricContainerBlockioIoTimeRecursiveAsync {
+ m := metricContainerBlockioIoTimeRecursiveAsync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoTimeRecursiveDiscard struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_time_recursive.discard metric with initial data.
+func (m *metricContainerBlockioIoTimeRecursiveDiscard) init() {
+ m.data.SetName("container.blockio.io_time_recursive.discard")
+ m.data.SetDescription("Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds.")
+ m.data.SetUnit("ms")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoTimeRecursiveDiscard) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoTimeRecursiveDiscard) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoTimeRecursiveDiscard) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoTimeRecursiveDiscard(settings MetricSettings) metricContainerBlockioIoTimeRecursiveDiscard {
+ m := metricContainerBlockioIoTimeRecursiveDiscard{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoTimeRecursiveRead struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_time_recursive.read metric with initial data.
+func (m *metricContainerBlockioIoTimeRecursiveRead) init() {
+ m.data.SetName("container.blockio.io_time_recursive.read")
+ m.data.SetDescription("Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds.")
+ m.data.SetUnit("ms")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoTimeRecursiveRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoTimeRecursiveRead) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoTimeRecursiveRead) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoTimeRecursiveRead(settings MetricSettings) metricContainerBlockioIoTimeRecursiveRead {
+ m := metricContainerBlockioIoTimeRecursiveRead{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoTimeRecursiveSync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_time_recursive.sync metric with initial data.
+func (m *metricContainerBlockioIoTimeRecursiveSync) init() {
+ m.data.SetName("container.blockio.io_time_recursive.sync")
+ m.data.SetDescription("Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds.")
+ m.data.SetUnit("ms")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoTimeRecursiveSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoTimeRecursiveSync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoTimeRecursiveSync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoTimeRecursiveSync(settings MetricSettings) metricContainerBlockioIoTimeRecursiveSync {
+ m := metricContainerBlockioIoTimeRecursiveSync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoTimeRecursiveTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_time_recursive.total metric with initial data.
+func (m *metricContainerBlockioIoTimeRecursiveTotal) init() {
+ m.data.SetName("container.blockio.io_time_recursive.total")
+ m.data.SetDescription("Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds.")
+ m.data.SetUnit("ms")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoTimeRecursiveTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoTimeRecursiveTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoTimeRecursiveTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoTimeRecursiveTotal(settings MetricSettings) metricContainerBlockioIoTimeRecursiveTotal {
+ m := metricContainerBlockioIoTimeRecursiveTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoTimeRecursiveWrite struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_time_recursive.write metric with initial data.
+func (m *metricContainerBlockioIoTimeRecursiveWrite) init() {
+ m.data.SetName("container.blockio.io_time_recursive.write")
+ m.data.SetDescription("Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds.")
+ m.data.SetUnit("ms")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoTimeRecursiveWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoTimeRecursiveWrite) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoTimeRecursiveWrite) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoTimeRecursiveWrite(settings MetricSettings) metricContainerBlockioIoTimeRecursiveWrite {
+ m := metricContainerBlockioIoTimeRecursiveWrite{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoWaitTimeRecursiveAsync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_wait_time_recursive.async metric with initial data.
+func (m *metricContainerBlockioIoWaitTimeRecursiveAsync) init() {
+ m.data.SetName("container.blockio.io_wait_time_recursive.async")
+ m.data.SetDescription("Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoWaitTimeRecursiveAsync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoWaitTimeRecursiveAsync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoWaitTimeRecursiveAsync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoWaitTimeRecursiveAsync(settings MetricSettings) metricContainerBlockioIoWaitTimeRecursiveAsync {
+ m := metricContainerBlockioIoWaitTimeRecursiveAsync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoWaitTimeRecursiveDiscard struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_wait_time_recursive.discard metric with initial data.
+func (m *metricContainerBlockioIoWaitTimeRecursiveDiscard) init() {
+ m.data.SetName("container.blockio.io_wait_time_recursive.discard")
+ m.data.SetDescription("Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoWaitTimeRecursiveDiscard) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoWaitTimeRecursiveDiscard) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoWaitTimeRecursiveDiscard) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoWaitTimeRecursiveDiscard(settings MetricSettings) metricContainerBlockioIoWaitTimeRecursiveDiscard {
+ m := metricContainerBlockioIoWaitTimeRecursiveDiscard{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoWaitTimeRecursiveRead struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_wait_time_recursive.read metric with initial data.
+func (m *metricContainerBlockioIoWaitTimeRecursiveRead) init() {
+ m.data.SetName("container.blockio.io_wait_time_recursive.read")
+ m.data.SetDescription("Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoWaitTimeRecursiveRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoWaitTimeRecursiveRead) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoWaitTimeRecursiveRead) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoWaitTimeRecursiveRead(settings MetricSettings) metricContainerBlockioIoWaitTimeRecursiveRead {
+ m := metricContainerBlockioIoWaitTimeRecursiveRead{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoWaitTimeRecursiveSync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_wait_time_recursive.sync metric with initial data.
+func (m *metricContainerBlockioIoWaitTimeRecursiveSync) init() {
+ m.data.SetName("container.blockio.io_wait_time_recursive.sync")
+ m.data.SetDescription("Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoWaitTimeRecursiveSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoWaitTimeRecursiveSync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoWaitTimeRecursiveSync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoWaitTimeRecursiveSync(settings MetricSettings) metricContainerBlockioIoWaitTimeRecursiveSync {
+ m := metricContainerBlockioIoWaitTimeRecursiveSync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoWaitTimeRecursiveTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_wait_time_recursive.total metric with initial data.
+func (m *metricContainerBlockioIoWaitTimeRecursiveTotal) init() {
+ m.data.SetName("container.blockio.io_wait_time_recursive.total")
+ m.data.SetDescription("Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoWaitTimeRecursiveTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoWaitTimeRecursiveTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoWaitTimeRecursiveTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoWaitTimeRecursiveTotal(settings MetricSettings) metricContainerBlockioIoWaitTimeRecursiveTotal {
+ m := metricContainerBlockioIoWaitTimeRecursiveTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioIoWaitTimeRecursiveWrite struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.io_wait_time_recursive.write metric with initial data.
+func (m *metricContainerBlockioIoWaitTimeRecursiveWrite) init() {
+ m.data.SetName("container.blockio.io_wait_time_recursive.write")
+ m.data.SetDescription("Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioIoWaitTimeRecursiveWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioIoWaitTimeRecursiveWrite) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioIoWaitTimeRecursiveWrite) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioIoWaitTimeRecursiveWrite(settings MetricSettings) metricContainerBlockioIoWaitTimeRecursiveWrite {
+ m := metricContainerBlockioIoWaitTimeRecursiveWrite{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioSectorsRecursiveAsync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.sectors_recursive.async metric with initial data.
+func (m *metricContainerBlockioSectorsRecursiveAsync) init() {
+ m.data.SetName("container.blockio.sectors_recursive.async")
+ m.data.SetDescription("Number of sectors transferred to/from disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioSectorsRecursiveAsync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioSectorsRecursiveAsync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioSectorsRecursiveAsync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioSectorsRecursiveAsync(settings MetricSettings) metricContainerBlockioSectorsRecursiveAsync {
+ m := metricContainerBlockioSectorsRecursiveAsync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioSectorsRecursiveDiscard struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.sectors_recursive.discard metric with initial data.
+func (m *metricContainerBlockioSectorsRecursiveDiscard) init() {
+ m.data.SetName("container.blockio.sectors_recursive.discard")
+ m.data.SetDescription("Number of sectors transferred to/from disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioSectorsRecursiveDiscard) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioSectorsRecursiveDiscard) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioSectorsRecursiveDiscard) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioSectorsRecursiveDiscard(settings MetricSettings) metricContainerBlockioSectorsRecursiveDiscard {
+ m := metricContainerBlockioSectorsRecursiveDiscard{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioSectorsRecursiveRead struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.sectors_recursive.read metric with initial data.
+func (m *metricContainerBlockioSectorsRecursiveRead) init() {
+ m.data.SetName("container.blockio.sectors_recursive.read")
+ m.data.SetDescription("Number of sectors transferred to/from disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioSectorsRecursiveRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioSectorsRecursiveRead) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioSectorsRecursiveRead) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioSectorsRecursiveRead(settings MetricSettings) metricContainerBlockioSectorsRecursiveRead {
+ m := metricContainerBlockioSectorsRecursiveRead{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioSectorsRecursiveSync struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.sectors_recursive.sync metric with initial data.
+func (m *metricContainerBlockioSectorsRecursiveSync) init() {
+ m.data.SetName("container.blockio.sectors_recursive.sync")
+ m.data.SetDescription("Number of sectors transferred to/from disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioSectorsRecursiveSync) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioSectorsRecursiveSync) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioSectorsRecursiveSync) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioSectorsRecursiveSync(settings MetricSettings) metricContainerBlockioSectorsRecursiveSync {
+ m := metricContainerBlockioSectorsRecursiveSync{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioSectorsRecursiveTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.sectors_recursive.total metric with initial data.
+func (m *metricContainerBlockioSectorsRecursiveTotal) init() {
+ m.data.SetName("container.blockio.sectors_recursive.total")
+ m.data.SetDescription("Number of sectors transferred to/from disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioSectorsRecursiveTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioSectorsRecursiveTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioSectorsRecursiveTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioSectorsRecursiveTotal(settings MetricSettings) metricContainerBlockioSectorsRecursiveTotal {
+ m := metricContainerBlockioSectorsRecursiveTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerBlockioSectorsRecursiveWrite struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.blockio.sectors_recursive.write metric with initial data.
+func (m *metricContainerBlockioSectorsRecursiveWrite) init() {
+ m.data.SetName("container.blockio.sectors_recursive.write")
+ m.data.SetDescription("Number of sectors transferred to/from disk by the group and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerBlockioSectorsRecursiveWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("device_major", pcommon.NewValueString(deviceMajorAttributeValue))
+ dp.Attributes().Insert("device_minor", pcommon.NewValueString(deviceMinorAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerBlockioSectorsRecursiveWrite) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerBlockioSectorsRecursiveWrite) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerBlockioSectorsRecursiveWrite(settings MetricSettings) metricContainerBlockioSectorsRecursiveWrite {
+ m := metricContainerBlockioSectorsRecursiveWrite{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUPercent struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.percent metric with initial data.
+func (m *metricContainerCPUPercent) init() {
+ m.data.SetName("container.cpu.percent")
+ m.data.SetDescription("Percent of CPU used by the container.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerCPUPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUPercent) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUPercent) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUPercent(settings MetricSettings) metricContainerCPUPercent {
+ m := metricContainerCPUPercent{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUThrottlingDataPeriods struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.throttling_data.periods metric with initial data.
+func (m *metricContainerCPUThrottlingDataPeriods) init() {
+ m.data.SetName("container.cpu.throttling_data.periods")
+ m.data.SetDescription("Number of periods with throttling active.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerCPUThrottlingDataPeriods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUThrottlingDataPeriods) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUThrottlingDataPeriods) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUThrottlingDataPeriods(settings MetricSettings) metricContainerCPUThrottlingDataPeriods {
+ m := metricContainerCPUThrottlingDataPeriods{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUThrottlingDataThrottledPeriods struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.throttling_data.throttled_periods metric with initial data.
+func (m *metricContainerCPUThrottlingDataThrottledPeriods) init() {
+ m.data.SetName("container.cpu.throttling_data.throttled_periods")
+ m.data.SetDescription("Number of periods when the container hits its throttling limit.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerCPUThrottlingDataThrottledPeriods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUThrottlingDataThrottledPeriods) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUThrottlingDataThrottledPeriods) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUThrottlingDataThrottledPeriods(settings MetricSettings) metricContainerCPUThrottlingDataThrottledPeriods {
+ m := metricContainerCPUThrottlingDataThrottledPeriods{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUThrottlingDataThrottledTime struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.throttling_data.throttled_time metric with initial data.
+func (m *metricContainerCPUThrottlingDataThrottledTime) init() {
+ m.data.SetName("container.cpu.throttling_data.throttled_time")
+ m.data.SetDescription("Aggregate time the container was throttled.")
+ m.data.SetUnit("ns")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerCPUThrottlingDataThrottledTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUThrottlingDataThrottledTime) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUThrottlingDataThrottledTime) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUThrottlingDataThrottledTime(settings MetricSettings) metricContainerCPUThrottlingDataThrottledTime {
+ m := metricContainerCPUThrottlingDataThrottledTime{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUUsageKernelmode struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.usage.kernelmode metric with initial data.
+func (m *metricContainerCPUUsageKernelmode) init() {
+ m.data.SetName("container.cpu.usage.kernelmode")
+ m.data.SetDescription("Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows).")
+ m.data.SetUnit("ns")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerCPUUsageKernelmode) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUUsageKernelmode) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUUsageKernelmode) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUUsageKernelmode(settings MetricSettings) metricContainerCPUUsageKernelmode {
+ m := metricContainerCPUUsageKernelmode{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUUsagePercpu struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.usage.percpu metric with initial data.
+func (m *metricContainerCPUUsagePercpu) init() {
+ m.data.SetName("container.cpu.usage.percpu")
+ m.data.SetDescription("Per-core CPU usage by the container.")
+ m.data.SetUnit("ns")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerCPUUsagePercpu) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, coreAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("core", pcommon.NewValueString(coreAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUUsagePercpu) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUUsagePercpu) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUUsagePercpu(settings MetricSettings) metricContainerCPUUsagePercpu {
+ m := metricContainerCPUUsagePercpu{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUUsageSystem struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.usage.system metric with initial data.
+func (m *metricContainerCPUUsageSystem) init() {
+ m.data.SetName("container.cpu.usage.system")
+ m.data.SetDescription("System CPU usage.")
+ m.data.SetUnit("ns")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerCPUUsageSystem) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUUsageSystem) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUUsageSystem) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUUsageSystem(settings MetricSettings) metricContainerCPUUsageSystem {
+ m := metricContainerCPUUsageSystem{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUUsageTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.usage.total metric with initial data.
+func (m *metricContainerCPUUsageTotal) init() {
+ m.data.SetName("container.cpu.usage.total")
+ m.data.SetDescription("Total CPU time consumed.")
+ m.data.SetUnit("ns")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerCPUUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUUsageTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUUsageTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUUsageTotal(settings MetricSettings) metricContainerCPUUsageTotal {
+ m := metricContainerCPUUsageTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerCPUUsageUsermode struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.cpu.usage.usermode metric with initial data.
+func (m *metricContainerCPUUsageUsermode) init() {
+ m.data.SetName("container.cpu.usage.usermode")
+ m.data.SetDescription("Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows).")
+ m.data.SetUnit("ns")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerCPUUsageUsermode) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerCPUUsageUsermode) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerCPUUsageUsermode) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerCPUUsageUsermode(settings MetricSettings) metricContainerCPUUsageUsermode {
+ m := metricContainerCPUUsageUsermode{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryActiveAnon struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.active_anon metric with initial data.
+func (m *metricContainerMemoryActiveAnon) init() {
+ m.data.SetName("container.memory.active_anon")
+ m.data.SetDescription("The amount of anonymous memory that has been identified as active by the kernel.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryActiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryActiveAnon) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryActiveAnon) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryActiveAnon(settings MetricSettings) metricContainerMemoryActiveAnon {
+ m := metricContainerMemoryActiveAnon{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryActiveFile struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.active_file metric with initial data.
+func (m *metricContainerMemoryActiveFile) init() {
+ m.data.SetName("container.memory.active_file")
+ m.data.SetDescription("Cache memory that has been identified as active by the kernel.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryActiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryActiveFile) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryActiveFile) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryActiveFile(settings MetricSettings) metricContainerMemoryActiveFile {
+ m := metricContainerMemoryActiveFile{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryCache struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.cache metric with initial data.
+func (m *metricContainerMemoryCache) init() {
+ m.data.SetName("container.memory.cache")
+ m.data.SetDescription("The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryCache) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryCache) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryCache) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryCache(settings MetricSettings) metricContainerMemoryCache {
+ m := metricContainerMemoryCache{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryDirty struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.dirty metric with initial data.
+func (m *metricContainerMemoryDirty) init() {
+ m.data.SetName("container.memory.dirty")
+ m.data.SetDescription("Bytes that are waiting to get written back to the disk, from this cgroup.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryDirty) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryDirty) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryDirty) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryDirty(settings MetricSettings) metricContainerMemoryDirty {
+ m := metricContainerMemoryDirty{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryHierarchicalMemoryLimit struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.hierarchical_memory_limit metric with initial data.
+func (m *metricContainerMemoryHierarchicalMemoryLimit) init() {
+ m.data.SetName("container.memory.hierarchical_memory_limit")
+ m.data.SetDescription("The maximum amount of physical memory that can be used by the processes of this control group.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryHierarchicalMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryHierarchicalMemoryLimit) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryHierarchicalMemoryLimit) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryHierarchicalMemoryLimit(settings MetricSettings) metricContainerMemoryHierarchicalMemoryLimit {
+ m := metricContainerMemoryHierarchicalMemoryLimit{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryHierarchicalMemswLimit struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.hierarchical_memsw_limit metric with initial data.
+func (m *metricContainerMemoryHierarchicalMemswLimit) init() {
+ m.data.SetName("container.memory.hierarchical_memsw_limit")
+ m.data.SetDescription("The maximum amount of RAM + swap that can be used by the processes of this control group.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryHierarchicalMemswLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryHierarchicalMemswLimit) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryHierarchicalMemswLimit) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryHierarchicalMemswLimit(settings MetricSettings) metricContainerMemoryHierarchicalMemswLimit {
+ m := metricContainerMemoryHierarchicalMemswLimit{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryInactiveAnon struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.inactive_anon metric with initial data.
+func (m *metricContainerMemoryInactiveAnon) init() {
+ m.data.SetName("container.memory.inactive_anon")
+ m.data.SetDescription("The amount of anonymous memory that has been identified as inactive by the kernel.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryInactiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryInactiveAnon) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryInactiveAnon) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryInactiveAnon(settings MetricSettings) metricContainerMemoryInactiveAnon {
+ m := metricContainerMemoryInactiveAnon{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryInactiveFile struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.inactive_file metric with initial data.
+func (m *metricContainerMemoryInactiveFile) init() {
+ m.data.SetName("container.memory.inactive_file")
+ m.data.SetDescription("Cache memory that has been identified as inactive by the kernel.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryInactiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryInactiveFile) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryInactiveFile) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryInactiveFile(settings MetricSettings) metricContainerMemoryInactiveFile {
+ m := metricContainerMemoryInactiveFile{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryMappedFile struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.mapped_file metric with initial data.
+func (m *metricContainerMemoryMappedFile) init() {
+ m.data.SetName("container.memory.mapped_file")
+ m.data.SetDescription("Indicates the amount of memory mapped by the processes in the control group.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryMappedFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryMappedFile) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryMappedFile) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryMappedFile(settings MetricSettings) metricContainerMemoryMappedFile {
+ m := metricContainerMemoryMappedFile{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryPercent struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.percent metric with initial data.
+func (m *metricContainerMemoryPercent) init() {
+ m.data.SetName("container.memory.percent")
+ m.data.SetDescription("Percentage of memory used.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryPercent) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryPercent) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryPercent(settings MetricSettings) metricContainerMemoryPercent {
+ m := metricContainerMemoryPercent{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryPgfault struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.pgfault metric with initial data.
+func (m *metricContainerMemoryPgfault) init() {
+ m.data.SetName("container.memory.pgfault")
+ m.data.SetDescription("Indicate the number of times that a process of the cgroup triggered a page fault.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerMemoryPgfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryPgfault) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryPgfault) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryPgfault(settings MetricSettings) metricContainerMemoryPgfault {
+ m := metricContainerMemoryPgfault{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryPgmajfault struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.pgmajfault metric with initial data.
+func (m *metricContainerMemoryPgmajfault) init() {
+ m.data.SetName("container.memory.pgmajfault")
+ m.data.SetDescription("Indicate the number of times that a process of the cgroup triggered a major fault.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerMemoryPgmajfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryPgmajfault) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryPgmajfault) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryPgmajfault(settings MetricSettings) metricContainerMemoryPgmajfault {
+ m := metricContainerMemoryPgmajfault{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryPgpgin struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.pgpgin metric with initial data.
+func (m *metricContainerMemoryPgpgin) init() {
+ m.data.SetName("container.memory.pgpgin")
+ m.data.SetDescription("Number of pages read from disk by the cgroup.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerMemoryPgpgin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryPgpgin) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryPgpgin) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryPgpgin(settings MetricSettings) metricContainerMemoryPgpgin {
+ m := metricContainerMemoryPgpgin{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryPgpgout struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.pgpgout metric with initial data.
+func (m *metricContainerMemoryPgpgout) init() {
+ m.data.SetName("container.memory.pgpgout")
+ m.data.SetDescription("Number of pages written to disk by the cgroup.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerMemoryPgpgout) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryPgpgout) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryPgpgout) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryPgpgout(settings MetricSettings) metricContainerMemoryPgpgout {
+ m := metricContainerMemoryPgpgout{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryRss struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.rss metric with initial data.
+func (m *metricContainerMemoryRss) init() {
+ m.data.SetName("container.memory.rss")
+ m.data.SetDescription("The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryRss) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryRss) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryRss(settings MetricSettings) metricContainerMemoryRss {
+ m := metricContainerMemoryRss{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryRssHuge struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.rss_huge metric with initial data.
+func (m *metricContainerMemoryRssHuge) init() {
+ m.data.SetName("container.memory.rss_huge")
+ m.data.SetDescription("Number of bytes of anonymous transparent hugepages in this cgroup.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryRssHuge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryRssHuge) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryRssHuge) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryRssHuge(settings MetricSettings) metricContainerMemoryRssHuge {
+ m := metricContainerMemoryRssHuge{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemorySwap struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.swap metric with initial data.
+func (m *metricContainerMemorySwap) init() {
+ m.data.SetName("container.memory.swap")
+ m.data.SetDescription("The amount of swap currently used by the processes in this cgroup.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemorySwap) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemorySwap) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemorySwap) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemorySwap(settings MetricSettings) metricContainerMemorySwap {
+ m := metricContainerMemorySwap{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalActiveAnon struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_active_anon metric with initial data.
+func (m *metricContainerMemoryTotalActiveAnon) init() {
+ m.data.SetName("container.memory.total_active_anon")
+ m.data.SetDescription("The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalActiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalActiveAnon) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalActiveAnon) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalActiveAnon(settings MetricSettings) metricContainerMemoryTotalActiveAnon {
+ m := metricContainerMemoryTotalActiveAnon{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalActiveFile struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_active_file metric with initial data.
+func (m *metricContainerMemoryTotalActiveFile) init() {
+ m.data.SetName("container.memory.total_active_file")
+ m.data.SetDescription("Cache memory that has been identified as active by the kernel. Includes descendant cgroups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalActiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalActiveFile) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalActiveFile) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalActiveFile(settings MetricSettings) metricContainerMemoryTotalActiveFile {
+ m := metricContainerMemoryTotalActiveFile{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalCache struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_cache metric with initial data.
+func (m *metricContainerMemoryTotalCache) init() {
+ m.data.SetName("container.memory.total_cache")
+ m.data.SetDescription("Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalCache) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalCache) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalCache) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalCache(settings MetricSettings) metricContainerMemoryTotalCache {
+ m := metricContainerMemoryTotalCache{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalDirty struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_dirty metric with initial data.
+func (m *metricContainerMemoryTotalDirty) init() {
+ m.data.SetName("container.memory.total_dirty")
+ m.data.SetDescription("Bytes that are waiting to get written back to the disk, from this cgroup and descendants.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalDirty) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalDirty) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalDirty) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalDirty(settings MetricSettings) metricContainerMemoryTotalDirty {
+ m := metricContainerMemoryTotalDirty{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalInactiveAnon struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_inactive_anon metric with initial data.
+func (m *metricContainerMemoryTotalInactiveAnon) init() {
+ m.data.SetName("container.memory.total_inactive_anon")
+ m.data.SetDescription("The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalInactiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalInactiveAnon) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalInactiveAnon) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalInactiveAnon(settings MetricSettings) metricContainerMemoryTotalInactiveAnon {
+ m := metricContainerMemoryTotalInactiveAnon{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalInactiveFile struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_inactive_file metric with initial data.
+func (m *metricContainerMemoryTotalInactiveFile) init() {
+ m.data.SetName("container.memory.total_inactive_file")
+ m.data.SetDescription("Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalInactiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalInactiveFile) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalInactiveFile) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalInactiveFile(settings MetricSettings) metricContainerMemoryTotalInactiveFile {
+ m := metricContainerMemoryTotalInactiveFile{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalMappedFile struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_mapped_file metric with initial data.
+func (m *metricContainerMemoryTotalMappedFile) init() {
+ m.data.SetName("container.memory.total_mapped_file")
+ m.data.SetDescription("Indicates the amount of memory mapped by the processes in the control group and descendant groups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalMappedFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalMappedFile) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalMappedFile) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalMappedFile(settings MetricSettings) metricContainerMemoryTotalMappedFile {
+ m := metricContainerMemoryTotalMappedFile{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalPgfault struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_pgfault metric with initial data.
+func (m *metricContainerMemoryTotalPgfault) init() {
+ m.data.SetName("container.memory.total_pgfault")
+ m.data.SetDescription("Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerMemoryTotalPgfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalPgfault) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalPgfault) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalPgfault(settings MetricSettings) metricContainerMemoryTotalPgfault {
+ m := metricContainerMemoryTotalPgfault{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalPgmajfault struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_pgmajfault metric with initial data.
+func (m *metricContainerMemoryTotalPgmajfault) init() {
+ m.data.SetName("container.memory.total_pgmajfault")
+ m.data.SetDescription("Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerMemoryTotalPgmajfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalPgmajfault) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalPgmajfault) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalPgmajfault(settings MetricSettings) metricContainerMemoryTotalPgmajfault {
+ m := metricContainerMemoryTotalPgmajfault{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalPgpgin struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_pgpgin metric with initial data.
+func (m *metricContainerMemoryTotalPgpgin) init() {
+ m.data.SetName("container.memory.total_pgpgin")
+ m.data.SetDescription("Number of pages read from disk by the cgroup and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerMemoryTotalPgpgin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalPgpgin) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalPgpgin) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalPgpgin(settings MetricSettings) metricContainerMemoryTotalPgpgin {
+ m := metricContainerMemoryTotalPgpgin{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalPgpgout struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_pgpgout metric with initial data.
+func (m *metricContainerMemoryTotalPgpgout) init() {
+ m.data.SetName("container.memory.total_pgpgout")
+ m.data.SetDescription("Number of pages written to disk by the cgroup and descendant groups.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricContainerMemoryTotalPgpgout) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalPgpgout) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalPgpgout) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalPgpgout(settings MetricSettings) metricContainerMemoryTotalPgpgout {
+ m := metricContainerMemoryTotalPgpgout{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalRss struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_rss metric with initial data.
+func (m *metricContainerMemoryTotalRss) init() {
+ m.data.SetName("container.memory.total_rss")
+ m.data.SetDescription("The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalRss) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalRss) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalRss(settings MetricSettings) metricContainerMemoryTotalRss {
+ m := metricContainerMemoryTotalRss{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalRssHuge struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_rss_huge metric with initial data.
+func (m *metricContainerMemoryTotalRssHuge) init() {
+ m.data.SetName("container.memory.total_rss_huge")
+ m.data.SetDescription("Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalRssHuge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalRssHuge) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalRssHuge) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalRssHuge(settings MetricSettings) metricContainerMemoryTotalRssHuge {
+ m := metricContainerMemoryTotalRssHuge{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalSwap struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_swap metric with initial data.
+func (m *metricContainerMemoryTotalSwap) init() {
+ m.data.SetName("container.memory.total_swap")
+ m.data.SetDescription("The amount of swap currently used by the processes in this cgroup and descendant groups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalSwap) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalSwap) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalSwap) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalSwap(settings MetricSettings) metricContainerMemoryTotalSwap {
+ m := metricContainerMemoryTotalSwap{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalUnevictable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_unevictable metric with initial data.
+func (m *metricContainerMemoryTotalUnevictable) init() {
+ m.data.SetName("container.memory.total_unevictable")
+ m.data.SetDescription("The amount of memory that cannot be reclaimed. Includes descendant cgroups.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalUnevictable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalUnevictable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalUnevictable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalUnevictable(settings MetricSettings) metricContainerMemoryTotalUnevictable {
+ m := metricContainerMemoryTotalUnevictable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryTotalWriteback struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.total_writeback metric with initial data.
+func (m *metricContainerMemoryTotalWriteback) init() {
+ m.data.SetName("container.memory.total_writeback")
+ m.data.SetDescription("Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryTotalWriteback) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryTotalWriteback) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryTotalWriteback) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryTotalWriteback(settings MetricSettings) metricContainerMemoryTotalWriteback {
+ m := metricContainerMemoryTotalWriteback{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryUnevictable struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.unevictable metric with initial data.
+func (m *metricContainerMemoryUnevictable) init() {
+ m.data.SetName("container.memory.unevictable")
+ m.data.SetDescription("The amount of memory that cannot be reclaimed.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryUnevictable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryUnevictable) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryUnevictable) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryUnevictable(settings MetricSettings) metricContainerMemoryUnevictable {
+ m := metricContainerMemoryUnevictable{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryUsageLimit struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.usage.limit metric with initial data.
+func (m *metricContainerMemoryUsageLimit) init() {
+ m.data.SetName("container.memory.usage.limit")
+ m.data.SetDescription("Memory limit of the container.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryUsageLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryUsageLimit) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryUsageLimit) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryUsageLimit(settings MetricSettings) metricContainerMemoryUsageLimit {
+ m := metricContainerMemoryUsageLimit{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryUsageMax struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.usage.max metric with initial data.
+func (m *metricContainerMemoryUsageMax) init() {
+ m.data.SetName("container.memory.usage.max")
+ m.data.SetDescription("Maximum memory usage.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryUsageMax) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryUsageMax) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryUsageMax(settings MetricSettings) metricContainerMemoryUsageMax {
+ m := metricContainerMemoryUsageMax{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryUsageTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.usage.total metric with initial data.
+func (m *metricContainerMemoryUsageTotal) init() {
+ m.data.SetName("container.memory.usage.total")
+ m.data.SetDescription("Memory usage of the container. This excludes the total cache.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryUsageTotal) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryUsageTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryUsageTotal(settings MetricSettings) metricContainerMemoryUsageTotal {
+ m := metricContainerMemoryUsageTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerMemoryWriteback struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.memory.writeback metric with initial data.
+func (m *metricContainerMemoryWriteback) init() {
+ m.data.SetName("container.memory.writeback")
+ m.data.SetDescription("Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricContainerMemoryWriteback) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerMemoryWriteback) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerMemoryWriteback) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerMemoryWriteback(settings MetricSettings) metricContainerMemoryWriteback {
+ m := metricContainerMemoryWriteback{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerNetworkIoUsageRxBytes struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.network.io.usage.rx_bytes metric with initial data.
+func (m *metricContainerNetworkIoUsageRxBytes) init() {
+ m.data.SetName("container.network.io.usage.rx_bytes")
+ m.data.SetDescription("Bytes received by the container.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerNetworkIoUsageRxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerNetworkIoUsageRxBytes) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerNetworkIoUsageRxBytes) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerNetworkIoUsageRxBytes(settings MetricSettings) metricContainerNetworkIoUsageRxBytes {
+ m := metricContainerNetworkIoUsageRxBytes{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerNetworkIoUsageRxDropped struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.network.io.usage.rx_dropped metric with initial data.
+func (m *metricContainerNetworkIoUsageRxDropped) init() {
+ m.data.SetName("container.network.io.usage.rx_dropped")
+ m.data.SetDescription("Incoming packets dropped.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerNetworkIoUsageRxDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerNetworkIoUsageRxDropped) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerNetworkIoUsageRxDropped) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerNetworkIoUsageRxDropped(settings MetricSettings) metricContainerNetworkIoUsageRxDropped {
+ m := metricContainerNetworkIoUsageRxDropped{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerNetworkIoUsageRxErrors struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.network.io.usage.rx_errors metric with initial data.
+func (m *metricContainerNetworkIoUsageRxErrors) init() {
+ m.data.SetName("container.network.io.usage.rx_errors")
+ m.data.SetDescription("Received errors.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerNetworkIoUsageRxErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerNetworkIoUsageRxErrors) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerNetworkIoUsageRxErrors) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerNetworkIoUsageRxErrors(settings MetricSettings) metricContainerNetworkIoUsageRxErrors {
+ m := metricContainerNetworkIoUsageRxErrors{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerNetworkIoUsageRxPackets struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.network.io.usage.rx_packets metric with initial data.
+func (m *metricContainerNetworkIoUsageRxPackets) init() {
+ m.data.SetName("container.network.io.usage.rx_packets")
+ m.data.SetDescription("Packets received.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerNetworkIoUsageRxPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerNetworkIoUsageRxPackets) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerNetworkIoUsageRxPackets) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerNetworkIoUsageRxPackets(settings MetricSettings) metricContainerNetworkIoUsageRxPackets {
+ m := metricContainerNetworkIoUsageRxPackets{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerNetworkIoUsageTxBytes struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.network.io.usage.tx_bytes metric with initial data.
+func (m *metricContainerNetworkIoUsageTxBytes) init() {
+ m.data.SetName("container.network.io.usage.tx_bytes")
+ m.data.SetDescription("Bytes sent.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerNetworkIoUsageTxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerNetworkIoUsageTxBytes) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerNetworkIoUsageTxBytes) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerNetworkIoUsageTxBytes(settings MetricSettings) metricContainerNetworkIoUsageTxBytes {
+ m := metricContainerNetworkIoUsageTxBytes{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerNetworkIoUsageTxDropped struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.network.io.usage.tx_dropped metric with initial data.
+func (m *metricContainerNetworkIoUsageTxDropped) init() {
+ m.data.SetName("container.network.io.usage.tx_dropped")
+ m.data.SetDescription("Outgoing packets dropped.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerNetworkIoUsageTxDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerNetworkIoUsageTxDropped) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerNetworkIoUsageTxDropped) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerNetworkIoUsageTxDropped(settings MetricSettings) metricContainerNetworkIoUsageTxDropped {
+ m := metricContainerNetworkIoUsageTxDropped{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerNetworkIoUsageTxErrors struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.network.io.usage.tx_errors metric with initial data.
+func (m *metricContainerNetworkIoUsageTxErrors) init() {
+ m.data.SetName("container.network.io.usage.tx_errors")
+ m.data.SetDescription("Sent errors.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerNetworkIoUsageTxErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerNetworkIoUsageTxErrors) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerNetworkIoUsageTxErrors) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerNetworkIoUsageTxErrors(settings MetricSettings) metricContainerNetworkIoUsageTxErrors {
+ m := metricContainerNetworkIoUsageTxErrors{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricContainerNetworkIoUsageTxPackets struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills container.network.io.usage.tx_packets metric with initial data.
+func (m *metricContainerNetworkIoUsageTxPackets) init() {
+ m.data.SetName("container.network.io.usage.tx_packets")
+ m.data.SetDescription("Packets sent.")
+ m.data.SetUnit("1")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricContainerNetworkIoUsageTxPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert("interface", pcommon.NewValueString(interfaceAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricContainerNetworkIoUsageTxPackets) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricContainerNetworkIoUsageTxPackets) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricContainerNetworkIoUsageTxPackets(settings MetricSettings) metricContainerNetworkIoUsageTxPackets {
+ m := metricContainerNetworkIoUsageTxPackets{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
+// required to produce metric representation defined in metadata and user settings.
+type MetricsBuilder struct {
+ startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
+ metricsCapacity int // maximum observed number of metrics per resource.
+ resourceCapacity int // maximum observed number of resource attributes.
+ metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
+ buildInfo component.BuildInfo // contains version information
+ metricContainerBlockioIoMergedRecursiveAsync metricContainerBlockioIoMergedRecursiveAsync
+ metricContainerBlockioIoMergedRecursiveDiscard metricContainerBlockioIoMergedRecursiveDiscard
+ metricContainerBlockioIoMergedRecursiveRead metricContainerBlockioIoMergedRecursiveRead
+ metricContainerBlockioIoMergedRecursiveSync metricContainerBlockioIoMergedRecursiveSync
+ metricContainerBlockioIoMergedRecursiveTotal metricContainerBlockioIoMergedRecursiveTotal
+ metricContainerBlockioIoMergedRecursiveWrite metricContainerBlockioIoMergedRecursiveWrite
+ metricContainerBlockioIoQueuedRecursiveAsync metricContainerBlockioIoQueuedRecursiveAsync
+ metricContainerBlockioIoQueuedRecursiveDiscard metricContainerBlockioIoQueuedRecursiveDiscard
+ metricContainerBlockioIoQueuedRecursiveRead metricContainerBlockioIoQueuedRecursiveRead
+ metricContainerBlockioIoQueuedRecursiveSync metricContainerBlockioIoQueuedRecursiveSync
+ metricContainerBlockioIoQueuedRecursiveTotal metricContainerBlockioIoQueuedRecursiveTotal
+ metricContainerBlockioIoQueuedRecursiveWrite metricContainerBlockioIoQueuedRecursiveWrite
+ metricContainerBlockioIoServiceBytesRecursiveAsync metricContainerBlockioIoServiceBytesRecursiveAsync
+ metricContainerBlockioIoServiceBytesRecursiveDiscard metricContainerBlockioIoServiceBytesRecursiveDiscard
+ metricContainerBlockioIoServiceBytesRecursiveRead metricContainerBlockioIoServiceBytesRecursiveRead
+ metricContainerBlockioIoServiceBytesRecursiveSync metricContainerBlockioIoServiceBytesRecursiveSync
+ metricContainerBlockioIoServiceBytesRecursiveTotal metricContainerBlockioIoServiceBytesRecursiveTotal
+ metricContainerBlockioIoServiceBytesRecursiveWrite metricContainerBlockioIoServiceBytesRecursiveWrite
+ metricContainerBlockioIoServiceTimeRecursiveAsync metricContainerBlockioIoServiceTimeRecursiveAsync
+ metricContainerBlockioIoServiceTimeRecursiveDiscard metricContainerBlockioIoServiceTimeRecursiveDiscard
+ metricContainerBlockioIoServiceTimeRecursiveRead metricContainerBlockioIoServiceTimeRecursiveRead
+ metricContainerBlockioIoServiceTimeRecursiveSync metricContainerBlockioIoServiceTimeRecursiveSync
+ metricContainerBlockioIoServiceTimeRecursiveTotal metricContainerBlockioIoServiceTimeRecursiveTotal
+ metricContainerBlockioIoServiceTimeRecursiveWrite metricContainerBlockioIoServiceTimeRecursiveWrite
+ metricContainerBlockioIoServicedRecursiveAsync metricContainerBlockioIoServicedRecursiveAsync
+ metricContainerBlockioIoServicedRecursiveDiscard metricContainerBlockioIoServicedRecursiveDiscard
+ metricContainerBlockioIoServicedRecursiveRead metricContainerBlockioIoServicedRecursiveRead
+ metricContainerBlockioIoServicedRecursiveSync metricContainerBlockioIoServicedRecursiveSync
+ metricContainerBlockioIoServicedRecursiveTotal metricContainerBlockioIoServicedRecursiveTotal
+ metricContainerBlockioIoServicedRecursiveWrite metricContainerBlockioIoServicedRecursiveWrite
+ metricContainerBlockioIoTimeRecursiveAsync metricContainerBlockioIoTimeRecursiveAsync
+ metricContainerBlockioIoTimeRecursiveDiscard metricContainerBlockioIoTimeRecursiveDiscard
+ metricContainerBlockioIoTimeRecursiveRead metricContainerBlockioIoTimeRecursiveRead
+ metricContainerBlockioIoTimeRecursiveSync metricContainerBlockioIoTimeRecursiveSync
+ metricContainerBlockioIoTimeRecursiveTotal metricContainerBlockioIoTimeRecursiveTotal
+ metricContainerBlockioIoTimeRecursiveWrite metricContainerBlockioIoTimeRecursiveWrite
+ metricContainerBlockioIoWaitTimeRecursiveAsync metricContainerBlockioIoWaitTimeRecursiveAsync
+ metricContainerBlockioIoWaitTimeRecursiveDiscard metricContainerBlockioIoWaitTimeRecursiveDiscard
+ metricContainerBlockioIoWaitTimeRecursiveRead metricContainerBlockioIoWaitTimeRecursiveRead
+ metricContainerBlockioIoWaitTimeRecursiveSync metricContainerBlockioIoWaitTimeRecursiveSync
+ metricContainerBlockioIoWaitTimeRecursiveTotal metricContainerBlockioIoWaitTimeRecursiveTotal
+ metricContainerBlockioIoWaitTimeRecursiveWrite metricContainerBlockioIoWaitTimeRecursiveWrite
+ metricContainerBlockioSectorsRecursiveAsync metricContainerBlockioSectorsRecursiveAsync
+ metricContainerBlockioSectorsRecursiveDiscard metricContainerBlockioSectorsRecursiveDiscard
+ metricContainerBlockioSectorsRecursiveRead metricContainerBlockioSectorsRecursiveRead
+ metricContainerBlockioSectorsRecursiveSync metricContainerBlockioSectorsRecursiveSync
+ metricContainerBlockioSectorsRecursiveTotal metricContainerBlockioSectorsRecursiveTotal
+ metricContainerBlockioSectorsRecursiveWrite metricContainerBlockioSectorsRecursiveWrite
+ metricContainerCPUPercent metricContainerCPUPercent
+ metricContainerCPUThrottlingDataPeriods metricContainerCPUThrottlingDataPeriods
+ metricContainerCPUThrottlingDataThrottledPeriods metricContainerCPUThrottlingDataThrottledPeriods
+ metricContainerCPUThrottlingDataThrottledTime metricContainerCPUThrottlingDataThrottledTime
+ metricContainerCPUUsageKernelmode metricContainerCPUUsageKernelmode
+ metricContainerCPUUsagePercpu metricContainerCPUUsagePercpu
+ metricContainerCPUUsageSystem metricContainerCPUUsageSystem
+ metricContainerCPUUsageTotal metricContainerCPUUsageTotal
+ metricContainerCPUUsageUsermode metricContainerCPUUsageUsermode
+ metricContainerMemoryActiveAnon metricContainerMemoryActiveAnon
+ metricContainerMemoryActiveFile metricContainerMemoryActiveFile
+ metricContainerMemoryCache metricContainerMemoryCache
+ metricContainerMemoryDirty metricContainerMemoryDirty
+ metricContainerMemoryHierarchicalMemoryLimit metricContainerMemoryHierarchicalMemoryLimit
+ metricContainerMemoryHierarchicalMemswLimit metricContainerMemoryHierarchicalMemswLimit
+ metricContainerMemoryInactiveAnon metricContainerMemoryInactiveAnon
+ metricContainerMemoryInactiveFile metricContainerMemoryInactiveFile
+ metricContainerMemoryMappedFile metricContainerMemoryMappedFile
+ metricContainerMemoryPercent metricContainerMemoryPercent
+ metricContainerMemoryPgfault metricContainerMemoryPgfault
+ metricContainerMemoryPgmajfault metricContainerMemoryPgmajfault
+ metricContainerMemoryPgpgin metricContainerMemoryPgpgin
+ metricContainerMemoryPgpgout metricContainerMemoryPgpgout
+ metricContainerMemoryRss metricContainerMemoryRss
+ metricContainerMemoryRssHuge metricContainerMemoryRssHuge
+ metricContainerMemorySwap metricContainerMemorySwap
+ metricContainerMemoryTotalActiveAnon metricContainerMemoryTotalActiveAnon
+ metricContainerMemoryTotalActiveFile metricContainerMemoryTotalActiveFile
+ metricContainerMemoryTotalCache metricContainerMemoryTotalCache
+ metricContainerMemoryTotalDirty metricContainerMemoryTotalDirty
+ metricContainerMemoryTotalInactiveAnon metricContainerMemoryTotalInactiveAnon
+ metricContainerMemoryTotalInactiveFile metricContainerMemoryTotalInactiveFile
+ metricContainerMemoryTotalMappedFile metricContainerMemoryTotalMappedFile
+ metricContainerMemoryTotalPgfault metricContainerMemoryTotalPgfault
+ metricContainerMemoryTotalPgmajfault metricContainerMemoryTotalPgmajfault
+ metricContainerMemoryTotalPgpgin metricContainerMemoryTotalPgpgin
+ metricContainerMemoryTotalPgpgout metricContainerMemoryTotalPgpgout
+ metricContainerMemoryTotalRss metricContainerMemoryTotalRss
+ metricContainerMemoryTotalRssHuge metricContainerMemoryTotalRssHuge
+ metricContainerMemoryTotalSwap metricContainerMemoryTotalSwap
+ metricContainerMemoryTotalUnevictable metricContainerMemoryTotalUnevictable
+ metricContainerMemoryTotalWriteback metricContainerMemoryTotalWriteback
+ metricContainerMemoryUnevictable metricContainerMemoryUnevictable
+ metricContainerMemoryUsageLimit metricContainerMemoryUsageLimit
+ metricContainerMemoryUsageMax metricContainerMemoryUsageMax
+ metricContainerMemoryUsageTotal metricContainerMemoryUsageTotal
+ metricContainerMemoryWriteback metricContainerMemoryWriteback
+ metricContainerNetworkIoUsageRxBytes metricContainerNetworkIoUsageRxBytes
+ metricContainerNetworkIoUsageRxDropped metricContainerNetworkIoUsageRxDropped
+ metricContainerNetworkIoUsageRxErrors metricContainerNetworkIoUsageRxErrors
+ metricContainerNetworkIoUsageRxPackets metricContainerNetworkIoUsageRxPackets
+ metricContainerNetworkIoUsageTxBytes metricContainerNetworkIoUsageTxBytes
+ metricContainerNetworkIoUsageTxDropped metricContainerNetworkIoUsageTxDropped
+ metricContainerNetworkIoUsageTxErrors metricContainerNetworkIoUsageTxErrors
+ metricContainerNetworkIoUsageTxPackets metricContainerNetworkIoUsageTxPackets
+}
+
+// metricBuilderOption applies changes to default metrics builder.
+type metricBuilderOption func(*MetricsBuilder)
+
+// WithStartTime sets startTime on the metrics builder.
+func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
+ return func(mb *MetricsBuilder) {
+ mb.startTime = startTime
+ }
+}
+
+func NewMetricsBuilder(settings MetricsSettings, buildInfo component.BuildInfo, options ...metricBuilderOption) *MetricsBuilder {
+ mb := &MetricsBuilder{
+ startTime: pcommon.NewTimestampFromTime(time.Now()),
+ metricsBuffer: pmetric.NewMetrics(),
+ buildInfo: buildInfo,
+ metricContainerBlockioIoMergedRecursiveAsync: newMetricContainerBlockioIoMergedRecursiveAsync(settings.ContainerBlockioIoMergedRecursiveAsync),
+ metricContainerBlockioIoMergedRecursiveDiscard: newMetricContainerBlockioIoMergedRecursiveDiscard(settings.ContainerBlockioIoMergedRecursiveDiscard),
+ metricContainerBlockioIoMergedRecursiveRead: newMetricContainerBlockioIoMergedRecursiveRead(settings.ContainerBlockioIoMergedRecursiveRead),
+ metricContainerBlockioIoMergedRecursiveSync: newMetricContainerBlockioIoMergedRecursiveSync(settings.ContainerBlockioIoMergedRecursiveSync),
+ metricContainerBlockioIoMergedRecursiveTotal: newMetricContainerBlockioIoMergedRecursiveTotal(settings.ContainerBlockioIoMergedRecursiveTotal),
+ metricContainerBlockioIoMergedRecursiveWrite: newMetricContainerBlockioIoMergedRecursiveWrite(settings.ContainerBlockioIoMergedRecursiveWrite),
+ metricContainerBlockioIoQueuedRecursiveAsync: newMetricContainerBlockioIoQueuedRecursiveAsync(settings.ContainerBlockioIoQueuedRecursiveAsync),
+ metricContainerBlockioIoQueuedRecursiveDiscard: newMetricContainerBlockioIoQueuedRecursiveDiscard(settings.ContainerBlockioIoQueuedRecursiveDiscard),
+ metricContainerBlockioIoQueuedRecursiveRead: newMetricContainerBlockioIoQueuedRecursiveRead(settings.ContainerBlockioIoQueuedRecursiveRead),
+ metricContainerBlockioIoQueuedRecursiveSync: newMetricContainerBlockioIoQueuedRecursiveSync(settings.ContainerBlockioIoQueuedRecursiveSync),
+ metricContainerBlockioIoQueuedRecursiveTotal: newMetricContainerBlockioIoQueuedRecursiveTotal(settings.ContainerBlockioIoQueuedRecursiveTotal),
+ metricContainerBlockioIoQueuedRecursiveWrite: newMetricContainerBlockioIoQueuedRecursiveWrite(settings.ContainerBlockioIoQueuedRecursiveWrite),
+ metricContainerBlockioIoServiceBytesRecursiveAsync: newMetricContainerBlockioIoServiceBytesRecursiveAsync(settings.ContainerBlockioIoServiceBytesRecursiveAsync),
+ metricContainerBlockioIoServiceBytesRecursiveDiscard: newMetricContainerBlockioIoServiceBytesRecursiveDiscard(settings.ContainerBlockioIoServiceBytesRecursiveDiscard),
+ metricContainerBlockioIoServiceBytesRecursiveRead: newMetricContainerBlockioIoServiceBytesRecursiveRead(settings.ContainerBlockioIoServiceBytesRecursiveRead),
+ metricContainerBlockioIoServiceBytesRecursiveSync: newMetricContainerBlockioIoServiceBytesRecursiveSync(settings.ContainerBlockioIoServiceBytesRecursiveSync),
+ metricContainerBlockioIoServiceBytesRecursiveTotal: newMetricContainerBlockioIoServiceBytesRecursiveTotal(settings.ContainerBlockioIoServiceBytesRecursiveTotal),
+ metricContainerBlockioIoServiceBytesRecursiveWrite: newMetricContainerBlockioIoServiceBytesRecursiveWrite(settings.ContainerBlockioIoServiceBytesRecursiveWrite),
+ metricContainerBlockioIoServiceTimeRecursiveAsync: newMetricContainerBlockioIoServiceTimeRecursiveAsync(settings.ContainerBlockioIoServiceTimeRecursiveAsync),
+ metricContainerBlockioIoServiceTimeRecursiveDiscard: newMetricContainerBlockioIoServiceTimeRecursiveDiscard(settings.ContainerBlockioIoServiceTimeRecursiveDiscard),
+ metricContainerBlockioIoServiceTimeRecursiveRead: newMetricContainerBlockioIoServiceTimeRecursiveRead(settings.ContainerBlockioIoServiceTimeRecursiveRead),
+ metricContainerBlockioIoServiceTimeRecursiveSync: newMetricContainerBlockioIoServiceTimeRecursiveSync(settings.ContainerBlockioIoServiceTimeRecursiveSync),
+ metricContainerBlockioIoServiceTimeRecursiveTotal: newMetricContainerBlockioIoServiceTimeRecursiveTotal(settings.ContainerBlockioIoServiceTimeRecursiveTotal),
+ metricContainerBlockioIoServiceTimeRecursiveWrite: newMetricContainerBlockioIoServiceTimeRecursiveWrite(settings.ContainerBlockioIoServiceTimeRecursiveWrite),
+ metricContainerBlockioIoServicedRecursiveAsync: newMetricContainerBlockioIoServicedRecursiveAsync(settings.ContainerBlockioIoServicedRecursiveAsync),
+ metricContainerBlockioIoServicedRecursiveDiscard: newMetricContainerBlockioIoServicedRecursiveDiscard(settings.ContainerBlockioIoServicedRecursiveDiscard),
+ metricContainerBlockioIoServicedRecursiveRead: newMetricContainerBlockioIoServicedRecursiveRead(settings.ContainerBlockioIoServicedRecursiveRead),
+ metricContainerBlockioIoServicedRecursiveSync: newMetricContainerBlockioIoServicedRecursiveSync(settings.ContainerBlockioIoServicedRecursiveSync),
+ metricContainerBlockioIoServicedRecursiveTotal: newMetricContainerBlockioIoServicedRecursiveTotal(settings.ContainerBlockioIoServicedRecursiveTotal),
+ metricContainerBlockioIoServicedRecursiveWrite: newMetricContainerBlockioIoServicedRecursiveWrite(settings.ContainerBlockioIoServicedRecursiveWrite),
+ metricContainerBlockioIoTimeRecursiveAsync: newMetricContainerBlockioIoTimeRecursiveAsync(settings.ContainerBlockioIoTimeRecursiveAsync),
+ metricContainerBlockioIoTimeRecursiveDiscard: newMetricContainerBlockioIoTimeRecursiveDiscard(settings.ContainerBlockioIoTimeRecursiveDiscard),
+ metricContainerBlockioIoTimeRecursiveRead: newMetricContainerBlockioIoTimeRecursiveRead(settings.ContainerBlockioIoTimeRecursiveRead),
+ metricContainerBlockioIoTimeRecursiveSync: newMetricContainerBlockioIoTimeRecursiveSync(settings.ContainerBlockioIoTimeRecursiveSync),
+ metricContainerBlockioIoTimeRecursiveTotal: newMetricContainerBlockioIoTimeRecursiveTotal(settings.ContainerBlockioIoTimeRecursiveTotal),
+ metricContainerBlockioIoTimeRecursiveWrite: newMetricContainerBlockioIoTimeRecursiveWrite(settings.ContainerBlockioIoTimeRecursiveWrite),
+ metricContainerBlockioIoWaitTimeRecursiveAsync: newMetricContainerBlockioIoWaitTimeRecursiveAsync(settings.ContainerBlockioIoWaitTimeRecursiveAsync),
+ metricContainerBlockioIoWaitTimeRecursiveDiscard: newMetricContainerBlockioIoWaitTimeRecursiveDiscard(settings.ContainerBlockioIoWaitTimeRecursiveDiscard),
+ metricContainerBlockioIoWaitTimeRecursiveRead: newMetricContainerBlockioIoWaitTimeRecursiveRead(settings.ContainerBlockioIoWaitTimeRecursiveRead),
+ metricContainerBlockioIoWaitTimeRecursiveSync: newMetricContainerBlockioIoWaitTimeRecursiveSync(settings.ContainerBlockioIoWaitTimeRecursiveSync),
+ metricContainerBlockioIoWaitTimeRecursiveTotal: newMetricContainerBlockioIoWaitTimeRecursiveTotal(settings.ContainerBlockioIoWaitTimeRecursiveTotal),
+ metricContainerBlockioIoWaitTimeRecursiveWrite: newMetricContainerBlockioIoWaitTimeRecursiveWrite(settings.ContainerBlockioIoWaitTimeRecursiveWrite),
+ metricContainerBlockioSectorsRecursiveAsync: newMetricContainerBlockioSectorsRecursiveAsync(settings.ContainerBlockioSectorsRecursiveAsync),
+ metricContainerBlockioSectorsRecursiveDiscard: newMetricContainerBlockioSectorsRecursiveDiscard(settings.ContainerBlockioSectorsRecursiveDiscard),
+ metricContainerBlockioSectorsRecursiveRead: newMetricContainerBlockioSectorsRecursiveRead(settings.ContainerBlockioSectorsRecursiveRead),
+ metricContainerBlockioSectorsRecursiveSync: newMetricContainerBlockioSectorsRecursiveSync(settings.ContainerBlockioSectorsRecursiveSync),
+ metricContainerBlockioSectorsRecursiveTotal: newMetricContainerBlockioSectorsRecursiveTotal(settings.ContainerBlockioSectorsRecursiveTotal),
+ metricContainerBlockioSectorsRecursiveWrite: newMetricContainerBlockioSectorsRecursiveWrite(settings.ContainerBlockioSectorsRecursiveWrite),
+ metricContainerCPUPercent: newMetricContainerCPUPercent(settings.ContainerCPUPercent),
+ metricContainerCPUThrottlingDataPeriods: newMetricContainerCPUThrottlingDataPeriods(settings.ContainerCPUThrottlingDataPeriods),
+ metricContainerCPUThrottlingDataThrottledPeriods: newMetricContainerCPUThrottlingDataThrottledPeriods(settings.ContainerCPUThrottlingDataThrottledPeriods),
+ metricContainerCPUThrottlingDataThrottledTime: newMetricContainerCPUThrottlingDataThrottledTime(settings.ContainerCPUThrottlingDataThrottledTime),
+ metricContainerCPUUsageKernelmode: newMetricContainerCPUUsageKernelmode(settings.ContainerCPUUsageKernelmode),
+ metricContainerCPUUsagePercpu: newMetricContainerCPUUsagePercpu(settings.ContainerCPUUsagePercpu),
+ metricContainerCPUUsageSystem: newMetricContainerCPUUsageSystem(settings.ContainerCPUUsageSystem),
+ metricContainerCPUUsageTotal: newMetricContainerCPUUsageTotal(settings.ContainerCPUUsageTotal),
+ metricContainerCPUUsageUsermode: newMetricContainerCPUUsageUsermode(settings.ContainerCPUUsageUsermode),
+ metricContainerMemoryActiveAnon: newMetricContainerMemoryActiveAnon(settings.ContainerMemoryActiveAnon),
+ metricContainerMemoryActiveFile: newMetricContainerMemoryActiveFile(settings.ContainerMemoryActiveFile),
+ metricContainerMemoryCache: newMetricContainerMemoryCache(settings.ContainerMemoryCache),
+ metricContainerMemoryDirty: newMetricContainerMemoryDirty(settings.ContainerMemoryDirty),
+ metricContainerMemoryHierarchicalMemoryLimit: newMetricContainerMemoryHierarchicalMemoryLimit(settings.ContainerMemoryHierarchicalMemoryLimit),
+ metricContainerMemoryHierarchicalMemswLimit: newMetricContainerMemoryHierarchicalMemswLimit(settings.ContainerMemoryHierarchicalMemswLimit),
+ metricContainerMemoryInactiveAnon: newMetricContainerMemoryInactiveAnon(settings.ContainerMemoryInactiveAnon),
+ metricContainerMemoryInactiveFile: newMetricContainerMemoryInactiveFile(settings.ContainerMemoryInactiveFile),
+ metricContainerMemoryMappedFile: newMetricContainerMemoryMappedFile(settings.ContainerMemoryMappedFile),
+ metricContainerMemoryPercent: newMetricContainerMemoryPercent(settings.ContainerMemoryPercent),
+ metricContainerMemoryPgfault: newMetricContainerMemoryPgfault(settings.ContainerMemoryPgfault),
+ metricContainerMemoryPgmajfault: newMetricContainerMemoryPgmajfault(settings.ContainerMemoryPgmajfault),
+ metricContainerMemoryPgpgin: newMetricContainerMemoryPgpgin(settings.ContainerMemoryPgpgin),
+ metricContainerMemoryPgpgout: newMetricContainerMemoryPgpgout(settings.ContainerMemoryPgpgout),
+ metricContainerMemoryRss: newMetricContainerMemoryRss(settings.ContainerMemoryRss),
+ metricContainerMemoryRssHuge: newMetricContainerMemoryRssHuge(settings.ContainerMemoryRssHuge),
+ metricContainerMemorySwap: newMetricContainerMemorySwap(settings.ContainerMemorySwap),
+ metricContainerMemoryTotalActiveAnon: newMetricContainerMemoryTotalActiveAnon(settings.ContainerMemoryTotalActiveAnon),
+ metricContainerMemoryTotalActiveFile: newMetricContainerMemoryTotalActiveFile(settings.ContainerMemoryTotalActiveFile),
+ metricContainerMemoryTotalCache: newMetricContainerMemoryTotalCache(settings.ContainerMemoryTotalCache),
+ metricContainerMemoryTotalDirty: newMetricContainerMemoryTotalDirty(settings.ContainerMemoryTotalDirty),
+ metricContainerMemoryTotalInactiveAnon: newMetricContainerMemoryTotalInactiveAnon(settings.ContainerMemoryTotalInactiveAnon),
+ metricContainerMemoryTotalInactiveFile: newMetricContainerMemoryTotalInactiveFile(settings.ContainerMemoryTotalInactiveFile),
+ metricContainerMemoryTotalMappedFile: newMetricContainerMemoryTotalMappedFile(settings.ContainerMemoryTotalMappedFile),
+ metricContainerMemoryTotalPgfault: newMetricContainerMemoryTotalPgfault(settings.ContainerMemoryTotalPgfault),
+ metricContainerMemoryTotalPgmajfault: newMetricContainerMemoryTotalPgmajfault(settings.ContainerMemoryTotalPgmajfault),
+ metricContainerMemoryTotalPgpgin: newMetricContainerMemoryTotalPgpgin(settings.ContainerMemoryTotalPgpgin),
+ metricContainerMemoryTotalPgpgout: newMetricContainerMemoryTotalPgpgout(settings.ContainerMemoryTotalPgpgout),
+ metricContainerMemoryTotalRss: newMetricContainerMemoryTotalRss(settings.ContainerMemoryTotalRss),
+ metricContainerMemoryTotalRssHuge: newMetricContainerMemoryTotalRssHuge(settings.ContainerMemoryTotalRssHuge),
+ metricContainerMemoryTotalSwap: newMetricContainerMemoryTotalSwap(settings.ContainerMemoryTotalSwap),
+ metricContainerMemoryTotalUnevictable: newMetricContainerMemoryTotalUnevictable(settings.ContainerMemoryTotalUnevictable),
+ metricContainerMemoryTotalWriteback: newMetricContainerMemoryTotalWriteback(settings.ContainerMemoryTotalWriteback),
+ metricContainerMemoryUnevictable: newMetricContainerMemoryUnevictable(settings.ContainerMemoryUnevictable),
+ metricContainerMemoryUsageLimit: newMetricContainerMemoryUsageLimit(settings.ContainerMemoryUsageLimit),
+ metricContainerMemoryUsageMax: newMetricContainerMemoryUsageMax(settings.ContainerMemoryUsageMax),
+ metricContainerMemoryUsageTotal: newMetricContainerMemoryUsageTotal(settings.ContainerMemoryUsageTotal),
+ metricContainerMemoryWriteback: newMetricContainerMemoryWriteback(settings.ContainerMemoryWriteback),
+ metricContainerNetworkIoUsageRxBytes: newMetricContainerNetworkIoUsageRxBytes(settings.ContainerNetworkIoUsageRxBytes),
+ metricContainerNetworkIoUsageRxDropped: newMetricContainerNetworkIoUsageRxDropped(settings.ContainerNetworkIoUsageRxDropped),
+ metricContainerNetworkIoUsageRxErrors: newMetricContainerNetworkIoUsageRxErrors(settings.ContainerNetworkIoUsageRxErrors),
+ metricContainerNetworkIoUsageRxPackets: newMetricContainerNetworkIoUsageRxPackets(settings.ContainerNetworkIoUsageRxPackets),
+ metricContainerNetworkIoUsageTxBytes: newMetricContainerNetworkIoUsageTxBytes(settings.ContainerNetworkIoUsageTxBytes),
+ metricContainerNetworkIoUsageTxDropped: newMetricContainerNetworkIoUsageTxDropped(settings.ContainerNetworkIoUsageTxDropped),
+ metricContainerNetworkIoUsageTxErrors: newMetricContainerNetworkIoUsageTxErrors(settings.ContainerNetworkIoUsageTxErrors),
+ metricContainerNetworkIoUsageTxPackets: newMetricContainerNetworkIoUsageTxPackets(settings.ContainerNetworkIoUsageTxPackets),
+ }
+ for _, op := range options {
+ op(mb)
+ }
+ return mb
+}
+
+// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
+func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
+ if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
+ mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
+ }
+ if mb.resourceCapacity < rm.Resource().Attributes().Len() {
+ mb.resourceCapacity = rm.Resource().Attributes().Len()
+ }
+}
+
+// ResourceMetricsOption applies changes to provided resource metrics.
+type ResourceMetricsOption func(pmetric.ResourceMetrics)
+
+// WithContainerHostname sets provided value as "container.hostname" attribute for current resource.
+func WithContainerHostname(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("container.hostname", val)
+ }
+}
+
+// WithContainerID sets provided value as "container.id" attribute for current resource.
+func WithContainerID(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("container.id", val)
+ }
+}
+
+// WithContainerImageName sets provided value as "container.image.name" attribute for current resource.
+func WithContainerImageName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("container.image.name", val)
+ }
+}
+
+// WithContainerName sets provided value as "container.name" attribute for current resource.
+func WithContainerName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("container.name", val)
+ }
+}
+
+// WithContainerRuntime sets provided value as "container.runtime" attribute for current resource.
+func WithContainerRuntime(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString("container.runtime", val)
+ }
+}
+
+// WithStartTimeOverride overrides start time for all the resource metrics data points.
+// This option should be only used if different start time has to be set on metrics coming from different resources.
+func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ var dps pmetric.NumberDataPointSlice
+ metrics := rm.ScopeMetrics().At(0).Metrics()
+ for i := 0; i < metrics.Len(); i++ {
+ switch metrics.At(i).DataType() {
+ case pmetric.MetricDataTypeGauge:
+ dps = metrics.At(i).Gauge().DataPoints()
+ case pmetric.MetricDataTypeSum:
+ dps = metrics.At(i).Sum().DataPoints()
+ }
+ for j := 0; j < dps.Len(); j++ {
+ dps.At(j).SetStartTimestamp(start)
+ }
+ }
+ }
+}
+
+// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
+// recording another set of data points as part of another resource. This function can be helpful when one scraper
+// needs to emit metrics from several resources. Otherwise calling this function is not required,
+// just `Emit` function can be called instead.
+// Resource attributes should be provided as ResourceMetricsOption arguments.
+func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
+ rm := pmetric.NewResourceMetrics()
+ rm.SetSchemaUrl(conventions.SchemaURL)
+ rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity)
+ ils := rm.ScopeMetrics().AppendEmpty()
+ ils.Scope().SetName("otelcol/dockerstatsreceiver")
+ ils.Scope().SetVersion(mb.buildInfo.Version)
+ ils.Metrics().EnsureCapacity(mb.metricsCapacity)
+ mb.metricContainerBlockioIoMergedRecursiveAsync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoMergedRecursiveDiscard.emit(ils.Metrics())
+ mb.metricContainerBlockioIoMergedRecursiveRead.emit(ils.Metrics())
+ mb.metricContainerBlockioIoMergedRecursiveSync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoMergedRecursiveTotal.emit(ils.Metrics())
+ mb.metricContainerBlockioIoMergedRecursiveWrite.emit(ils.Metrics())
+ mb.metricContainerBlockioIoQueuedRecursiveAsync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoQueuedRecursiveDiscard.emit(ils.Metrics())
+ mb.metricContainerBlockioIoQueuedRecursiveRead.emit(ils.Metrics())
+ mb.metricContainerBlockioIoQueuedRecursiveSync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoQueuedRecursiveTotal.emit(ils.Metrics())
+ mb.metricContainerBlockioIoQueuedRecursiveWrite.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceBytesRecursiveAsync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceBytesRecursiveDiscard.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceBytesRecursiveRead.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceBytesRecursiveSync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceBytesRecursiveTotal.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceBytesRecursiveWrite.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceTimeRecursiveAsync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceTimeRecursiveDiscard.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceTimeRecursiveRead.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceTimeRecursiveSync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceTimeRecursiveTotal.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServiceTimeRecursiveWrite.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServicedRecursiveAsync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServicedRecursiveDiscard.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServicedRecursiveRead.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServicedRecursiveSync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServicedRecursiveTotal.emit(ils.Metrics())
+ mb.metricContainerBlockioIoServicedRecursiveWrite.emit(ils.Metrics())
+ mb.metricContainerBlockioIoTimeRecursiveAsync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoTimeRecursiveDiscard.emit(ils.Metrics())
+ mb.metricContainerBlockioIoTimeRecursiveRead.emit(ils.Metrics())
+ mb.metricContainerBlockioIoTimeRecursiveSync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoTimeRecursiveTotal.emit(ils.Metrics())
+ mb.metricContainerBlockioIoTimeRecursiveWrite.emit(ils.Metrics())
+ mb.metricContainerBlockioIoWaitTimeRecursiveAsync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoWaitTimeRecursiveDiscard.emit(ils.Metrics())
+ mb.metricContainerBlockioIoWaitTimeRecursiveRead.emit(ils.Metrics())
+ mb.metricContainerBlockioIoWaitTimeRecursiveSync.emit(ils.Metrics())
+ mb.metricContainerBlockioIoWaitTimeRecursiveTotal.emit(ils.Metrics())
+ mb.metricContainerBlockioIoWaitTimeRecursiveWrite.emit(ils.Metrics())
+ mb.metricContainerBlockioSectorsRecursiveAsync.emit(ils.Metrics())
+ mb.metricContainerBlockioSectorsRecursiveDiscard.emit(ils.Metrics())
+ mb.metricContainerBlockioSectorsRecursiveRead.emit(ils.Metrics())
+ mb.metricContainerBlockioSectorsRecursiveSync.emit(ils.Metrics())
+ mb.metricContainerBlockioSectorsRecursiveTotal.emit(ils.Metrics())
+ mb.metricContainerBlockioSectorsRecursiveWrite.emit(ils.Metrics())
+ mb.metricContainerCPUPercent.emit(ils.Metrics())
+ mb.metricContainerCPUThrottlingDataPeriods.emit(ils.Metrics())
+ mb.metricContainerCPUThrottlingDataThrottledPeriods.emit(ils.Metrics())
+ mb.metricContainerCPUThrottlingDataThrottledTime.emit(ils.Metrics())
+ mb.metricContainerCPUUsageKernelmode.emit(ils.Metrics())
+ mb.metricContainerCPUUsagePercpu.emit(ils.Metrics())
+ mb.metricContainerCPUUsageSystem.emit(ils.Metrics())
+ mb.metricContainerCPUUsageTotal.emit(ils.Metrics())
+ mb.metricContainerCPUUsageUsermode.emit(ils.Metrics())
+ mb.metricContainerMemoryActiveAnon.emit(ils.Metrics())
+ mb.metricContainerMemoryActiveFile.emit(ils.Metrics())
+ mb.metricContainerMemoryCache.emit(ils.Metrics())
+ mb.metricContainerMemoryDirty.emit(ils.Metrics())
+ mb.metricContainerMemoryHierarchicalMemoryLimit.emit(ils.Metrics())
+ mb.metricContainerMemoryHierarchicalMemswLimit.emit(ils.Metrics())
+ mb.metricContainerMemoryInactiveAnon.emit(ils.Metrics())
+ mb.metricContainerMemoryInactiveFile.emit(ils.Metrics())
+ mb.metricContainerMemoryMappedFile.emit(ils.Metrics())
+ mb.metricContainerMemoryPercent.emit(ils.Metrics())
+ mb.metricContainerMemoryPgfault.emit(ils.Metrics())
+ mb.metricContainerMemoryPgmajfault.emit(ils.Metrics())
+ mb.metricContainerMemoryPgpgin.emit(ils.Metrics())
+ mb.metricContainerMemoryPgpgout.emit(ils.Metrics())
+ mb.metricContainerMemoryRss.emit(ils.Metrics())
+ mb.metricContainerMemoryRssHuge.emit(ils.Metrics())
+ mb.metricContainerMemorySwap.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalActiveAnon.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalActiveFile.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalCache.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalDirty.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalInactiveAnon.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalInactiveFile.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalMappedFile.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalPgfault.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalPgmajfault.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalPgpgin.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalPgpgout.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalRss.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalRssHuge.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalSwap.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalUnevictable.emit(ils.Metrics())
+ mb.metricContainerMemoryTotalWriteback.emit(ils.Metrics())
+ mb.metricContainerMemoryUnevictable.emit(ils.Metrics())
+ mb.metricContainerMemoryUsageLimit.emit(ils.Metrics())
+ mb.metricContainerMemoryUsageMax.emit(ils.Metrics())
+ mb.metricContainerMemoryUsageTotal.emit(ils.Metrics())
+ mb.metricContainerMemoryWriteback.emit(ils.Metrics())
+ mb.metricContainerNetworkIoUsageRxBytes.emit(ils.Metrics())
+ mb.metricContainerNetworkIoUsageRxDropped.emit(ils.Metrics())
+ mb.metricContainerNetworkIoUsageRxErrors.emit(ils.Metrics())
+ mb.metricContainerNetworkIoUsageRxPackets.emit(ils.Metrics())
+ mb.metricContainerNetworkIoUsageTxBytes.emit(ils.Metrics())
+ mb.metricContainerNetworkIoUsageTxDropped.emit(ils.Metrics())
+ mb.metricContainerNetworkIoUsageTxErrors.emit(ils.Metrics())
+ mb.metricContainerNetworkIoUsageTxPackets.emit(ils.Metrics())
+ for _, op := range rmo {
+ op(rm)
+ }
+ if ils.Metrics().Len() > 0 {
+ mb.updateCapacity(rm)
+ rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
+ }
+}
+
+// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
+// recording another set of metrics. This function will be responsible for applying all the transformations required to
+// produce metric representation defined in metadata and user settings, e.g. delta or cumulative.
+func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics {
+ mb.EmitForResource(rmo...)
+ metrics := pmetric.NewMetrics()
+ mb.metricsBuffer.MoveTo(metrics)
+ return metrics
+}
+
+// RecordContainerBlockioIoMergedRecursiveAsyncDataPoint adds a data point to container.blockio.io_merged_recursive.async metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoMergedRecursiveAsyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoMergedRecursiveAsync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoMergedRecursiveDiscardDataPoint adds a data point to container.blockio.io_merged_recursive.discard metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoMergedRecursiveDiscardDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoMergedRecursiveDiscard.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoMergedRecursiveReadDataPoint adds a data point to container.blockio.io_merged_recursive.read metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoMergedRecursiveReadDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoMergedRecursiveRead.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoMergedRecursiveSyncDataPoint adds a data point to container.blockio.io_merged_recursive.sync metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoMergedRecursiveSyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoMergedRecursiveSync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoMergedRecursiveTotalDataPoint adds a data point to container.blockio.io_merged_recursive.total metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoMergedRecursiveTotalDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoMergedRecursiveTotal.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoMergedRecursiveWriteDataPoint adds a data point to container.blockio.io_merged_recursive.write metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoMergedRecursiveWriteDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoMergedRecursiveWrite.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoQueuedRecursiveAsyncDataPoint adds a data point to container.blockio.io_queued_recursive.async metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoQueuedRecursiveAsyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoQueuedRecursiveAsync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoQueuedRecursiveDiscardDataPoint adds a data point to container.blockio.io_queued_recursive.discard metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoQueuedRecursiveDiscardDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoQueuedRecursiveDiscard.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoQueuedRecursiveReadDataPoint adds a data point to container.blockio.io_queued_recursive.read metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoQueuedRecursiveReadDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoQueuedRecursiveRead.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoQueuedRecursiveSyncDataPoint adds a data point to container.blockio.io_queued_recursive.sync metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoQueuedRecursiveSyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoQueuedRecursiveSync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoQueuedRecursiveTotalDataPoint adds a data point to container.blockio.io_queued_recursive.total metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoQueuedRecursiveTotalDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoQueuedRecursiveTotal.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoQueuedRecursiveWriteDataPoint adds a data point to container.blockio.io_queued_recursive.write metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoQueuedRecursiveWriteDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoQueuedRecursiveWrite.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceBytesRecursiveAsyncDataPoint adds a data point to container.blockio.io_service_bytes_recursive.async metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveAsyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceBytesRecursiveAsync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceBytesRecursiveDiscardDataPoint adds a data point to container.blockio.io_service_bytes_recursive.discard metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveDiscardDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceBytesRecursiveDiscard.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceBytesRecursiveReadDataPoint adds a data point to container.blockio.io_service_bytes_recursive.read metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveReadDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceBytesRecursiveRead.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceBytesRecursiveSyncDataPoint adds a data point to container.blockio.io_service_bytes_recursive.sync metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveSyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceBytesRecursiveSync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceBytesRecursiveTotalDataPoint adds a data point to container.blockio.io_service_bytes_recursive.total metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveTotalDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceBytesRecursiveTotal.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceBytesRecursiveWriteDataPoint adds a data point to container.blockio.io_service_bytes_recursive.write metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveWriteDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceBytesRecursiveWrite.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceTimeRecursiveAsyncDataPoint adds a data point to container.blockio.io_service_time_recursive.async metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceTimeRecursiveAsyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceTimeRecursiveAsync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceTimeRecursiveDiscardDataPoint adds a data point to container.blockio.io_service_time_recursive.discard metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceTimeRecursiveDiscardDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceTimeRecursiveDiscard.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceTimeRecursiveReadDataPoint adds a data point to container.blockio.io_service_time_recursive.read metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceTimeRecursiveReadDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceTimeRecursiveRead.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceTimeRecursiveSyncDataPoint adds a data point to container.blockio.io_service_time_recursive.sync metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceTimeRecursiveSyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceTimeRecursiveSync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceTimeRecursiveTotalDataPoint adds a data point to container.blockio.io_service_time_recursive.total metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceTimeRecursiveTotalDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceTimeRecursiveTotal.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServiceTimeRecursiveWriteDataPoint adds a data point to container.blockio.io_service_time_recursive.write metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServiceTimeRecursiveWriteDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServiceTimeRecursiveWrite.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServicedRecursiveAsyncDataPoint adds a data point to container.blockio.io_serviced_recursive.async metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServicedRecursiveAsyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServicedRecursiveAsync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServicedRecursiveDiscardDataPoint adds a data point to container.blockio.io_serviced_recursive.discard metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServicedRecursiveDiscardDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServicedRecursiveDiscard.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServicedRecursiveReadDataPoint adds a data point to container.blockio.io_serviced_recursive.read metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServicedRecursiveReadDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServicedRecursiveRead.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServicedRecursiveSyncDataPoint adds a data point to container.blockio.io_serviced_recursive.sync metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServicedRecursiveSyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServicedRecursiveSync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServicedRecursiveTotalDataPoint adds a data point to container.blockio.io_serviced_recursive.total metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServicedRecursiveTotalDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServicedRecursiveTotal.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoServicedRecursiveWriteDataPoint adds a data point to container.blockio.io_serviced_recursive.write metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoServicedRecursiveWriteDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoServicedRecursiveWrite.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoTimeRecursiveAsyncDataPoint adds a data point to container.blockio.io_time_recursive.async metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoTimeRecursiveAsyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoTimeRecursiveAsync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoTimeRecursiveDiscardDataPoint adds a data point to container.blockio.io_time_recursive.discard metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoTimeRecursiveDiscardDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoTimeRecursiveDiscard.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoTimeRecursiveReadDataPoint adds a data point to container.blockio.io_time_recursive.read metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoTimeRecursiveReadDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoTimeRecursiveRead.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoTimeRecursiveSyncDataPoint adds a data point to container.blockio.io_time_recursive.sync metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoTimeRecursiveSyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoTimeRecursiveSync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoTimeRecursiveTotalDataPoint adds a data point to container.blockio.io_time_recursive.total metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoTimeRecursiveTotalDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoTimeRecursiveTotal.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoTimeRecursiveWriteDataPoint adds a data point to container.blockio.io_time_recursive.write metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoTimeRecursiveWriteDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoTimeRecursiveWrite.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoWaitTimeRecursiveAsyncDataPoint adds a data point to container.blockio.io_wait_time_recursive.async metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoWaitTimeRecursiveAsyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoWaitTimeRecursiveAsync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoWaitTimeRecursiveDiscardDataPoint adds a data point to container.blockio.io_wait_time_recursive.discard metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoWaitTimeRecursiveDiscardDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoWaitTimeRecursiveDiscard.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoWaitTimeRecursiveReadDataPoint adds a data point to container.blockio.io_wait_time_recursive.read metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoWaitTimeRecursiveReadDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoWaitTimeRecursiveRead.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoWaitTimeRecursiveSyncDataPoint adds a data point to container.blockio.io_wait_time_recursive.sync metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoWaitTimeRecursiveSyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoWaitTimeRecursiveSync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoWaitTimeRecursiveTotalDataPoint adds a data point to container.blockio.io_wait_time_recursive.total metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoWaitTimeRecursiveTotalDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoWaitTimeRecursiveTotal.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioIoWaitTimeRecursiveWriteDataPoint adds a data point to container.blockio.io_wait_time_recursive.write metric.
+func (mb *MetricsBuilder) RecordContainerBlockioIoWaitTimeRecursiveWriteDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioIoWaitTimeRecursiveWrite.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioSectorsRecursiveAsyncDataPoint adds a data point to container.blockio.sectors_recursive.async metric.
+func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveAsyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioSectorsRecursiveAsync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioSectorsRecursiveDiscardDataPoint adds a data point to container.blockio.sectors_recursive.discard metric.
+func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveDiscardDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioSectorsRecursiveDiscard.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioSectorsRecursiveReadDataPoint adds a data point to container.blockio.sectors_recursive.read metric.
+func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveReadDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioSectorsRecursiveRead.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioSectorsRecursiveSyncDataPoint adds a data point to container.blockio.sectors_recursive.sync metric.
+func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveSyncDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioSectorsRecursiveSync.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioSectorsRecursiveTotalDataPoint adds a data point to container.blockio.sectors_recursive.total metric.
+func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveTotalDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioSectorsRecursiveTotal.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerBlockioSectorsRecursiveWriteDataPoint adds a data point to container.blockio.sectors_recursive.write metric.
+func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveWriteDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string) {
+ mb.metricContainerBlockioSectorsRecursiveWrite.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue)
+}
+
+// RecordContainerCPUPercentDataPoint adds a data point to container.cpu.percent metric.
+func (mb *MetricsBuilder) RecordContainerCPUPercentDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricContainerCPUPercent.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerCPUThrottlingDataPeriodsDataPoint adds a data point to container.cpu.throttling_data.periods metric.
+func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataPeriodsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerCPUThrottlingDataPeriods.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint adds a data point to container.cpu.throttling_data.throttled_periods metric.
+func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerCPUThrottlingDataThrottledPeriods.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerCPUThrottlingDataThrottledTimeDataPoint adds a data point to container.cpu.throttling_data.throttled_time metric.
+func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataThrottledTimeDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerCPUThrottlingDataThrottledTime.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerCPUUsageKernelmodeDataPoint adds a data point to container.cpu.usage.kernelmode metric.
+func (mb *MetricsBuilder) RecordContainerCPUUsageKernelmodeDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerCPUUsageKernelmode.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerCPUUsagePercpuDataPoint adds a data point to container.cpu.usage.percpu metric.
+func (mb *MetricsBuilder) RecordContainerCPUUsagePercpuDataPoint(ts pcommon.Timestamp, val int64, coreAttributeValue string) {
+ mb.metricContainerCPUUsagePercpu.recordDataPoint(mb.startTime, ts, val, coreAttributeValue)
+}
+
+// RecordContainerCPUUsageSystemDataPoint adds a data point to container.cpu.usage.system metric.
+func (mb *MetricsBuilder) RecordContainerCPUUsageSystemDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerCPUUsageSystem.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerCPUUsageTotalDataPoint adds a data point to container.cpu.usage.total metric.
+func (mb *MetricsBuilder) RecordContainerCPUUsageTotalDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerCPUUsageTotal.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerCPUUsageUsermodeDataPoint adds a data point to container.cpu.usage.usermode metric.
+func (mb *MetricsBuilder) RecordContainerCPUUsageUsermodeDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerCPUUsageUsermode.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryActiveAnonDataPoint adds a data point to container.memory.active_anon metric.
+func (mb *MetricsBuilder) RecordContainerMemoryActiveAnonDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryActiveAnon.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryActiveFileDataPoint adds a data point to container.memory.active_file metric.
+func (mb *MetricsBuilder) RecordContainerMemoryActiveFileDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryActiveFile.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryCacheDataPoint adds a data point to container.memory.cache metric.
+func (mb *MetricsBuilder) RecordContainerMemoryCacheDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryCache.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryDirtyDataPoint adds a data point to container.memory.dirty metric.
+func (mb *MetricsBuilder) RecordContainerMemoryDirtyDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryDirty.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryHierarchicalMemoryLimitDataPoint adds a data point to container.memory.hierarchical_memory_limit metric.
+func (mb *MetricsBuilder) RecordContainerMemoryHierarchicalMemoryLimitDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryHierarchicalMemoryLimit.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryHierarchicalMemswLimitDataPoint adds a data point to container.memory.hierarchical_memsw_limit metric.
+func (mb *MetricsBuilder) RecordContainerMemoryHierarchicalMemswLimitDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryHierarchicalMemswLimit.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryInactiveAnonDataPoint adds a data point to container.memory.inactive_anon metric.
+func (mb *MetricsBuilder) RecordContainerMemoryInactiveAnonDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryInactiveAnon.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryInactiveFileDataPoint adds a data point to container.memory.inactive_file metric.
+func (mb *MetricsBuilder) RecordContainerMemoryInactiveFileDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryInactiveFile.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryMappedFileDataPoint adds a data point to container.memory.mapped_file metric.
+func (mb *MetricsBuilder) RecordContainerMemoryMappedFileDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryMappedFile.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryPercentDataPoint adds a data point to container.memory.percent metric.
+func (mb *MetricsBuilder) RecordContainerMemoryPercentDataPoint(ts pcommon.Timestamp, val float64) {
+ mb.metricContainerMemoryPercent.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryPgfaultDataPoint adds a data point to container.memory.pgfault metric.
+func (mb *MetricsBuilder) RecordContainerMemoryPgfaultDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryPgfault.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryPgmajfaultDataPoint adds a data point to container.memory.pgmajfault metric.
+func (mb *MetricsBuilder) RecordContainerMemoryPgmajfaultDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryPgmajfault.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryPgpginDataPoint adds a data point to container.memory.pgpgin metric.
+func (mb *MetricsBuilder) RecordContainerMemoryPgpginDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryPgpgin.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryPgpgoutDataPoint adds a data point to container.memory.pgpgout metric.
+func (mb *MetricsBuilder) RecordContainerMemoryPgpgoutDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryPgpgout.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryRssDataPoint adds a data point to container.memory.rss metric.
+func (mb *MetricsBuilder) RecordContainerMemoryRssDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryRss.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryRssHugeDataPoint adds a data point to container.memory.rss_huge metric.
+func (mb *MetricsBuilder) RecordContainerMemoryRssHugeDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryRssHuge.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemorySwapDataPoint adds a data point to container.memory.swap metric.
+func (mb *MetricsBuilder) RecordContainerMemorySwapDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemorySwap.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalActiveAnonDataPoint adds a data point to container.memory.total_active_anon metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalActiveAnonDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalActiveAnon.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalActiveFileDataPoint adds a data point to container.memory.total_active_file metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalActiveFileDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalActiveFile.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalCacheDataPoint adds a data point to container.memory.total_cache metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalCacheDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalCache.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalDirtyDataPoint adds a data point to container.memory.total_dirty metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalDirtyDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalDirty.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalInactiveAnonDataPoint adds a data point to container.memory.total_inactive_anon metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalInactiveAnonDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalInactiveAnon.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalInactiveFileDataPoint adds a data point to container.memory.total_inactive_file metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalInactiveFileDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalInactiveFile.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalMappedFileDataPoint adds a data point to container.memory.total_mapped_file metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalMappedFileDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalMappedFile.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalPgfaultDataPoint adds a data point to container.memory.total_pgfault metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalPgfaultDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalPgfault.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalPgmajfaultDataPoint adds a data point to container.memory.total_pgmajfault metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalPgmajfaultDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalPgmajfault.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalPgpginDataPoint adds a data point to container.memory.total_pgpgin metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalPgpginDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalPgpgin.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalPgpgoutDataPoint adds a data point to container.memory.total_pgpgout metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalPgpgoutDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalPgpgout.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalRssDataPoint adds a data point to container.memory.total_rss metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalRssDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalRss.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalRssHugeDataPoint adds a data point to container.memory.total_rss_huge metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalRssHugeDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalRssHuge.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalSwapDataPoint adds a data point to container.memory.total_swap metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalSwapDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalSwap.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalUnevictableDataPoint adds a data point to container.memory.total_unevictable metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalUnevictableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalUnevictable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryTotalWritebackDataPoint adds a data point to container.memory.total_writeback metric.
+func (mb *MetricsBuilder) RecordContainerMemoryTotalWritebackDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryTotalWriteback.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryUnevictableDataPoint adds a data point to container.memory.unevictable metric.
+func (mb *MetricsBuilder) RecordContainerMemoryUnevictableDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryUnevictable.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryUsageLimitDataPoint adds a data point to container.memory.usage.limit metric.
+func (mb *MetricsBuilder) RecordContainerMemoryUsageLimitDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryUsageLimit.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryUsageMaxDataPoint adds a data point to container.memory.usage.max metric.
+func (mb *MetricsBuilder) RecordContainerMemoryUsageMaxDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryUsageMax.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryUsageTotalDataPoint adds a data point to container.memory.usage.total metric.
+func (mb *MetricsBuilder) RecordContainerMemoryUsageTotalDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryUsageTotal.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerMemoryWritebackDataPoint adds a data point to container.memory.writeback metric.
+func (mb *MetricsBuilder) RecordContainerMemoryWritebackDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricContainerMemoryWriteback.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordContainerNetworkIoUsageRxBytesDataPoint adds a data point to container.network.io.usage.rx_bytes metric.
+func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxBytesDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ mb.metricContainerNetworkIoUsageRxBytes.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
+}
+
+// RecordContainerNetworkIoUsageRxDroppedDataPoint adds a data point to container.network.io.usage.rx_dropped metric.
+func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxDroppedDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ mb.metricContainerNetworkIoUsageRxDropped.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
+}
+
+// RecordContainerNetworkIoUsageRxErrorsDataPoint adds a data point to container.network.io.usage.rx_errors metric.
+func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ mb.metricContainerNetworkIoUsageRxErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
+}
+
+// RecordContainerNetworkIoUsageRxPacketsDataPoint adds a data point to container.network.io.usage.rx_packets metric.
+func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxPacketsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ mb.metricContainerNetworkIoUsageRxPackets.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
+}
+
+// RecordContainerNetworkIoUsageTxBytesDataPoint adds a data point to container.network.io.usage.tx_bytes metric.
+func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxBytesDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ mb.metricContainerNetworkIoUsageTxBytes.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
+}
+
+// RecordContainerNetworkIoUsageTxDroppedDataPoint adds a data point to container.network.io.usage.tx_dropped metric.
+func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxDroppedDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ mb.metricContainerNetworkIoUsageTxDropped.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
+}
+
+// RecordContainerNetworkIoUsageTxErrorsDataPoint adds a data point to container.network.io.usage.tx_errors metric.
+func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ mb.metricContainerNetworkIoUsageTxErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
+}
+
+// RecordContainerNetworkIoUsageTxPacketsDataPoint adds a data point to container.network.io.usage.tx_packets metric.
+func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxPacketsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
+ mb.metricContainerNetworkIoUsageTxPackets.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
+}
+
+// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
+// and metrics builder should update its startTime and reset it's internal state accordingly.
+func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
+ mb.startTime = pcommon.NewTimestampFromTime(time.Now())
+ for _, op := range options {
+ op(mb)
+ }
+}
diff --git a/receiver/dockerstatsreceiver/metadata.yaml b/receiver/dockerstatsreceiver/metadata.yaml
new file mode 100644
index 000000000000..ce5f4faec8c2
--- /dev/null
+++ b/receiver/dockerstatsreceiver/metadata.yaml
@@ -0,0 +1,595 @@
+name: dockerstatsreceiver
+
+sem_conv_version: 1.6.1
+
+# Note: there are other, additional resource attributes that the user can configure through the yaml
+resource_attributes:
+ container.runtime:
+ description: "The runtime of the container. For this receiver, it will always be 'docker'."
+ type: string
+ container.id:
+ description: "The ID of the container."
+ type: string
+ container.image.name:
+ description: "The name of the docker image in use by the container."
+ type: string
+ container.name:
+ description: "The name of the container."
+ type: string
+ container.hostname:
+ description: "The hostname of the container."
+ type: string
+
+attributes:
+ core:
+ description: "The CPU core number when utilising per-CPU metrics."
+ type: string
+ device_major:
+ description: "Device major number for block IO operations."
+ type: string
+ device_minor:
+ description: "Device minor number for block IO operations."
+ type: string
+ interface:
+ description: "Network interface."
+ type: string
+
+metrics:
+ # CPU
+ container.cpu.usage.system:
+ enabled: true
+ description: "System CPU usage."
+ unit: ns
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ container.cpu.usage.total:
+ enabled: true
+ description: "Total CPU time consumed."
+ unit: ns
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ container.cpu.usage.kernelmode:
+ enabled: true
+ description: >-
+ Time spent by tasks of the cgroup in kernel mode (Linux).
+ Time spent by all container processes in kernel mode (Windows).
+ unit: ns
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ container.cpu.usage.usermode:
+ enabled: true
+ description: >-
+ Time spent by tasks of the cgroup in user mode (Linux).
+ Time spent by all container processes in user mode (Windows).
+ unit: ns
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ container.cpu.usage.percpu:
+ enabled: false
+ description: "Per-core CPU usage by the container."
+ unit: ns
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - core
+ container.cpu.throttling_data.periods:
+ enabled: true
+ description: "Number of periods with throttling active."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ container.cpu.throttling_data.throttled_periods:
+ enabled: true
+ description: "Number of periods when the container hits its throttling limit."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ container.cpu.throttling_data.throttled_time:
+ enabled: true
+ description: "Aggregate time the container was throttled."
+ unit: ns
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ container.cpu.percent:
+ enabled: true
+ description: "Percent of CPU used by the container."
+ unit: "1"
+ gauge:
+ value_type: double
+
+
+ # Memory
+ container.memory.usage.limit:
+ enabled: true
+ description: "Memory limit of the container."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.usage.total:
+ enabled: true
+ description: "Memory usage of the container. This excludes the total cache."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.usage.max:
+ enabled: true
+ description: "Maximum memory usage."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.percent:
+ enabled: true
+ description: "Percentage of memory used."
+ unit: 1
+ gauge:
+ value_type: double
+ container.memory.cache:
+ enabled: true
+ description: "The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.rss:
+ enabled: true
+ description: "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.rss_huge:
+ enabled: true
+ description: "Number of bytes of anonymous transparent hugepages in this cgroup."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.dirty:
+ enabled: true
+ description: "Bytes that are waiting to get written back to the disk, from this cgroup."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.writeback:
+ enabled: true
+ description: "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.mapped_file:
+ enabled: true
+ description: "Indicates the amount of memory mapped by the processes in the control group."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.pgpgin:
+ enabled: true
+ description: "Number of pages read from disk by the cgroup."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt)."
+ unit: "1"
+ sum:
+ value_type: int
+ aggregation: cumulative
+ monotonic: true
+ container.memory.pgpgout:
+ enabled: true
+ description: "Number of pages written to disk by the cgroup."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt)."
+ unit: "1"
+ sum:
+ value_type: int
+ aggregation: cumulative
+ monotonic: true
+ container.memory.swap:
+ enabled: true
+ description: "The amount of swap currently used by the processes in this cgroup."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.pgfault:
+ enabled: true
+ description: "Indicate the number of times that a process of the cgroup triggered a page fault."
+ unit: "1"
+ sum:
+ value_type: int
+ aggregation: cumulative
+ monotonic: true
+ container.memory.pgmajfault:
+ enabled: true
+ description: "Indicate the number of times that a process of the cgroup triggered a major fault."
+ unit: "1"
+ sum:
+ value_type: int
+ aggregation: cumulative
+ monotonic: true
+ container.memory.inactive_anon:
+ enabled: true
+ description: "The amount of anonymous memory that has been identified as inactive by the kernel."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.active_anon:
+ enabled: true
+ description: "The amount of anonymous memory that has been identified as active by the kernel."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.inactive_file:
+ enabled: true
+ description: "Cache memory that has been identified as inactive by the kernel."
+ extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)"
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.active_file:
+ enabled: true
+ description: "Cache memory that has been identified as active by the kernel."
+ extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)"
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.unevictable:
+ enabled: true
+ description: "The amount of memory that cannot be reclaimed."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.hierarchical_memory_limit:
+ enabled: true
+ description: "The maximum amount of physical memory that can be used by the processes of this control group."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.hierarchical_memsw_limit:
+ enabled: true
+ description: "The maximum amount of RAM + swap that can be used by the processes of this control group."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_cache:
+ enabled: true
+ description: "Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_rss:
+ enabled: true
+ description: "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_rss_huge:
+ enabled: true
+ description: "Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_dirty:
+ enabled: true
+ description: "Bytes that are waiting to get written back to the disk, from this cgroup and descendants."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_writeback:
+ enabled: true
+ description: "Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_mapped_file:
+ enabled: true
+ description: "Indicates the amount of memory mapped by the processes in the control group and descendant groups."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_pgpgin:
+ enabled: true
+ description: "Number of pages read from disk by the cgroup and descendant groups."
+ unit: "1"
+ sum:
+ value_type: int
+ aggregation: cumulative
+ monotonic: true
+ container.memory.total_pgpgout:
+ enabled: true
+ description: "Number of pages written to disk by the cgroup and descendant groups."
+ unit: "1"
+ sum:
+ value_type: int
+ aggregation: cumulative
+ monotonic: true
+ container.memory.total_swap:
+ enabled: true
+ description: "The amount of swap currently used by the processes in this cgroup and descendant groups."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_pgfault:
+ enabled: true
+ description: "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault."
+ unit: "1"
+ sum:
+ value_type: int
+ aggregation: cumulative
+ monotonic: true
+ container.memory.total_pgmajfault:
+ enabled: true
+ description: "Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault."
+ unit: "1"
+ sum:
+ value_type: int
+ aggregation: cumulative
+ monotonic: true
+ container.memory.total_inactive_anon:
+ enabled: true
+ description: "The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_active_anon:
+ enabled: true
+ description: "The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_inactive_file:
+ enabled: true
+ description: "Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups."
+ extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_active_file:
+ enabled: true
+ description: "Cache memory that has been identified as active by the kernel. Includes descendant cgroups."
+ extended_documentation: "[More docs](https://docs.docker.com/config/containers/runmetrics/)."
+ unit: By
+ gauge:
+ value_type: int
+ container.memory.total_unevictable:
+ enabled: true
+ description: "The amount of memory that cannot be reclaimed. Includes descendant cgroups."
+ unit: By
+ gauge:
+ value_type: int
+
+
+ # BlockIO
+ container.blockio.io_merged_recursive.read: &merged
+ enabled: true
+ description: "Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - device_major
+ - device_minor
+ container.blockio.io_merged_recursive.write: *merged
+ container.blockio.io_merged_recursive.sync: *merged
+ container.blockio.io_merged_recursive.async: *merged
+ container.blockio.io_merged_recursive.discard: *merged
+ container.blockio.io_merged_recursive.total: *merged
+
+ container.blockio.io_queued_recursive.read: &queued
+ enabled: true
+ description: "Number of requests queued up for this cgroup and its descendant cgroups."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - device_major
+ - device_minor
+ container.blockio.io_queued_recursive.write: *queued
+ container.blockio.io_queued_recursive.sync: *queued
+ container.blockio.io_queued_recursive.async: *queued
+ container.blockio.io_queued_recursive.discard: *queued
+ container.blockio.io_queued_recursive.total: *queued
+
+ container.blockio.io_service_bytes_recursive.read: &service_bytes
+ enabled: true
+ description: "Number of bytes transferred to/from the disk by the group and descendant groups."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)."
+ unit: By
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - device_major
+ - device_minor
+ container.blockio.io_service_bytes_recursive.write: *service_bytes
+ container.blockio.io_service_bytes_recursive.async: *service_bytes
+ container.blockio.io_service_bytes_recursive.sync: *service_bytes
+ container.blockio.io_service_bytes_recursive.discard: *service_bytes
+ container.blockio.io_service_bytes_recursive.total: *service_bytes
+
+ container.blockio.io_service_time_recursive.read: &service_time
+ enabled: true
+ description: "Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)."
+ unit: "1" # Preserving legacy incorrect unit for now. Should be nanoseconds eventually.
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - device_major
+ - device_minor
+ container.blockio.io_service_time_recursive.write: *service_time
+ container.blockio.io_service_time_recursive.async: *service_time
+ container.blockio.io_service_time_recursive.sync: *service_time
+ container.blockio.io_service_time_recursive.discard: *service_time
+ container.blockio.io_service_time_recursive.total: *service_time
+
+ container.blockio.io_serviced_recursive.read: &serviced
+ enabled: true
+ description: "Number of IOs (bio) issued to the disk by the group and descendant groups."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - device_major
+ - device_minor
+ container.blockio.io_serviced_recursive.write: *serviced
+ container.blockio.io_serviced_recursive.async: *serviced
+ container.blockio.io_serviced_recursive.sync: *serviced
+ container.blockio.io_serviced_recursive.discard: *serviced
+ container.blockio.io_serviced_recursive.total: *serviced
+
+ container.blockio.io_time_recursive.read: &time
+ enabled: true
+ description: "Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)."
+ unit: ms
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - device_major
+ - device_minor
+ container.blockio.io_time_recursive.write: *time
+ container.blockio.io_time_recursive.async: *time
+ container.blockio.io_time_recursive.sync: *time
+ container.blockio.io_time_recursive.discard: *time
+ container.blockio.io_time_recursive.total: *time
+
+ container.blockio.io_wait_time_recursive.read: &wait_time
+ enabled: true
+ description: "Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)."
+ unit: "1" # Should be in ns but preserving legacy mistake for now
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - device_major
+ - device_minor
+ container.blockio.io_wait_time_recursive.write: *wait_time
+ container.blockio.io_wait_time_recursive.async: *wait_time
+ container.blockio.io_wait_time_recursive.sync: *wait_time
+ container.blockio.io_wait_time_recursive.discard: *wait_time
+ container.blockio.io_wait_time_recursive.total: *wait_time
+
+ container.blockio.sectors_recursive.read: §ors
+ enabled: true
+ description: "Number of sectors transferred to/from disk by the group and descendant groups."
+ extended_documentation: "[More docs](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt)."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - device_major
+ - device_minor
+ container.blockio.sectors_recursive.write: *sectors
+ container.blockio.sectors_recursive.sync: *sectors
+ container.blockio.sectors_recursive.async: *sectors
+ container.blockio.sectors_recursive.discard: *sectors
+ container.blockio.sectors_recursive.total: *sectors
+
+ # Network
+ container.network.io.usage.rx_bytes:
+ enabled: true
+ description: "Bytes received by the container."
+ unit: By
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - interface
+ container.network.io.usage.tx_bytes:
+ enabled: true
+ description: "Bytes sent."
+ unit: By
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - interface
+ container.network.io.usage.rx_dropped:
+ enabled: true
+ description: "Incoming packets dropped."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - interface
+ container.network.io.usage.tx_dropped:
+ enabled: true
+ description: "Outgoing packets dropped."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - interface
+ container.network.io.usage.rx_errors:
+ enabled: true
+ description: "Received errors."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - interface
+ container.network.io.usage.tx_errors:
+ enabled: true
+ description: "Sent errors."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - interface
+ container.network.io.usage.rx_packets:
+ enabled: true
+ description: "Packets received."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - interface
+ container.network.io.usage.tx_packets:
+ enabled: true
+ description: "Packets sent."
+ unit: "1"
+ sum:
+ value_type: int
+ monotonic: true
+ aggregation: cumulative
+ attributes:
+ - interface
diff --git a/receiver/dockerstatsreceiver/metrics.go b/receiver/dockerstatsreceiver/metrics.go
index 434284161165..ac4bdf69124f 100644
--- a/receiver/dockerstatsreceiver/metrics.go
+++ b/receiver/dockerstatsreceiver/metrics.go
@@ -177,12 +177,7 @@ func appendMemoryMetrics(dest pmetric.MetricSlice, memoryStats *dtypes.MemorySta
populateGauge(dest.AppendEmpty(), "memory.usage.limit", int64(memoryStats.Limit), ts)
populateGauge(dest.AppendEmpty(), "memory.usage.total", totalUsage, ts)
- var pctUsed float64
- if float64(memoryStats.Limit) == 0 {
- pctUsed = 0
- } else {
- pctUsed = 100.0 * (float64(memoryStats.Usage) - float64(memoryStats.Stats["cache"])) / float64(memoryStats.Limit)
- }
+ pctUsed := calculateMemoryPercent(memoryStats)
populateGaugeF(dest.AppendEmpty(), "memory.percent", "1", pctUsed, ts, nil, nil)
populateGauge(dest.AppendEmpty(), "memory.usage.max", int64(memoryStats.MaxUsage), ts)
@@ -282,3 +277,10 @@ func populateAttributes(dest pcommon.Map, labelKeys []string, labelValues []stri
dest.UpsertString(labelKeys[i], labelValues[i])
}
}
+
+func calculateMemoryPercent(memoryStats *dtypes.MemoryStats) float64 {
+ if float64(memoryStats.Limit) == 0 {
+ return 0
+ }
+ return 100.0 * (float64(memoryStats.Usage) - float64(memoryStats.Stats["cache"])) / float64(memoryStats.Limit)
+}
diff --git a/receiver/dockerstatsreceiver/receiver.go b/receiver/dockerstatsreceiver/receiver.go
index 954238c15e9e..d254d95865a0 100644
--- a/receiver/dockerstatsreceiver/receiver.go
+++ b/receiver/dockerstatsreceiver/receiver.go
@@ -20,14 +20,13 @@ import (
"time"
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver/scrapererror"
- "go.opentelemetry.io/collector/receiver/scraperhelper"
"go.uber.org/multierr"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver/internal/metadata"
)
const (
@@ -39,24 +38,18 @@ type receiver struct {
config *Config
settings component.ReceiverCreateSettings
client *docker.Client
+ mb *metadata.MetricsBuilder
}
-func NewReceiver(
- _ context.Context,
- set component.ReceiverCreateSettings,
- config *Config,
- nextConsumer consumer.Metrics,
-) (component.MetricsReceiver, error) {
- recv := receiver{
+func newReceiver(set component.ReceiverCreateSettings, config *Config) *receiver {
+ if config.ProvidePerCoreCPUMetrics {
+ config.MetricsConfig.ContainerCPUUsagePercpu.Enabled = config.ProvidePerCoreCPUMetrics
+ }
+ return &receiver{
config: config,
settings: set,
+ mb: metadata.NewMetricsBuilder(config.MetricsConfig, set.BuildInfo),
}
-
- scrp, err := scraperhelper.NewScraper(typeStr, recv.scrape, scraperhelper.WithStart(recv.start))
- if err != nil {
- return nil, err
- }
- return scraperhelper.NewScraperControllerReceiver(&recv.config.ScraperControllerSettings, set, nextConsumer, scraperhelper.AddScraper(scrp))
}
func (r *receiver) start(ctx context.Context, _ component.Host) error {
@@ -115,7 +108,7 @@ func (r *receiver) scrape(ctx context.Context) (pmetric.Metrics, error) {
errs = multierr.Append(errs, scrapererror.NewPartialScrapeError(res.err, 0))
continue
}
- res.md.ResourceMetrics().CopyTo(md.ResourceMetrics())
+ res.md.ResourceMetrics().MoveAndAppendTo(md.ResourceMetrics())
}
return md, errs
diff --git a/receiver/dockerstatsreceiver/receiver_test.go b/receiver/dockerstatsreceiver/receiver_test.go
index 27cb37117b89..c560e0834d54 100644
--- a/receiver/dockerstatsreceiver/receiver_test.go
+++ b/receiver/dockerstatsreceiver/receiver_test.go
@@ -21,16 +21,27 @@ package dockerstatsreceiver
import (
"context"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/component/componenttest"
- "go.opentelemetry.io/collector/consumer/consumertest"
+ "go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver/scraperhelper"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest/golden"
)
+var mockFolder = filepath.Join("testdata", "mock")
+
func TestNewReceiver(t *testing.T) {
cfg := &Config{
ScraperControllerSettings: scraperhelper.ScraperControllerSettings{
@@ -39,10 +50,8 @@ func TestNewReceiver(t *testing.T) {
Endpoint: "unix:///run/some.sock",
DockerAPIVersion: defaultDockerAPIVersion,
}
- nextConsumer := consumertest.NewNop()
- mr, err := NewReceiver(context.Background(), componenttest.NewNopReceiverCreateSettings(), cfg, nextConsumer)
+ mr := newReceiver(componenttest.NewNopReceiverCreateSettings(), cfg)
assert.NotNil(t, mr)
- assert.Nil(t, err)
}
func TestErrorsInStart(t *testing.T) {
@@ -54,19 +63,149 @@ func TestErrorsInStart(t *testing.T) {
Endpoint: unreachable,
DockerAPIVersion: defaultDockerAPIVersion,
}
- recv, err := NewReceiver(context.Background(), componenttest.NewNopReceiverCreateSettings(), cfg, consumertest.NewNop())
+ recv := newReceiver(componenttest.NewNopReceiverCreateSettings(), cfg)
assert.NotNil(t, recv)
- assert.Nil(t, err)
cfg.Endpoint = "..not/a/valid/endpoint"
- err = recv.Start(context.Background(), componenttest.NewNopHost())
+ err := recv.start(context.Background(), componenttest.NewNopHost())
assert.Error(t, err)
assert.Contains(t, err.Error(), "unable to parse docker host")
cfg.Endpoint = unreachable
- err = recv.Start(context.Background(), componenttest.NewNopHost())
+ err = recv.start(context.Background(), componenttest.NewNopHost())
assert.Error(t, err)
assert.Contains(t, err.Error(), "context deadline exceeded")
+}
+
+func TestScrapes(t *testing.T) {
+ containerIDs := []string{
+ "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326",
+ "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7",
+ "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce"}
+
+ singleContainerEngineMock, err := dockerMockServer(&map[string]string{
+ "/v1.22/containers/json": filepath.Join(mockFolder, "single_container", "containers.json"),
+ "/v1.22/containers/" + containerIDs[0] + "/json": filepath.Join(mockFolder, "single_container", "container.json"),
+ "/v1.22/containers/" + containerIDs[0] + "/stats": filepath.Join(mockFolder, "single_container", "stats.json"),
+ })
+ assert.NoError(t, err)
+ defer singleContainerEngineMock.Close()
+
+ twoContainerEngineMock, err := dockerMockServer(&map[string]string{
+ "/v1.22/containers/json": filepath.Join(mockFolder, "two_containers", "containers.json"),
+ "/v1.22/containers/" + containerIDs[1] + "/json": filepath.Join(mockFolder, "two_containers", "container1.json"),
+ "/v1.22/containers/" + containerIDs[2] + "/json": filepath.Join(mockFolder, "two_containers", "container2.json"),
+ "/v1.22/containers/" + containerIDs[1] + "/stats": filepath.Join(mockFolder, "two_containers", "stats1.json"),
+ "/v1.22/containers/" + containerIDs[2] + "/stats": filepath.Join(mockFolder, "two_containers", "stats2.json"),
+ })
+ assert.NoError(t, err)
+ defer twoContainerEngineMock.Close()
+
+ testCases := []struct {
+ desc string
+ scrape func(*receiver) (pmetric.Metrics, error)
+ expectedMetricsFile string
+ mockDockerEngine *httptest.Server
+ }{
+ {
+ desc: "scrapeV1_single_container",
+ scrape: func(rcv *receiver) (pmetric.Metrics, error) {
+ return rcv.scrape(context.Background())
+ },
+ expectedMetricsFile: filepath.Join(mockFolder, "single_container", "expected_metrics.json"),
+ mockDockerEngine: singleContainerEngineMock,
+ },
+ {
+ desc: "scrapeV2_single_container",
+ scrape: func(rcv *receiver) (pmetric.Metrics, error) {
+ return rcv.scrapeV2(context.Background())
+ },
+ expectedMetricsFile: filepath.Join(mockFolder, "single_container", "expected_metrics.json"),
+ mockDockerEngine: singleContainerEngineMock,
+ },
+ {
+ desc: "scrapeV1_two_containers",
+ scrape: func(rcv *receiver) (pmetric.Metrics, error) {
+ return rcv.scrape(context.Background())
+ },
+ expectedMetricsFile: filepath.Join(mockFolder, "two_containers", "expected_metrics.json"),
+ mockDockerEngine: twoContainerEngineMock,
+ },
+ {
+ desc: "scrapeV2_two_containers",
+ scrape: func(rcv *receiver) (pmetric.Metrics, error) {
+ return rcv.scrapeV2(context.Background())
+ },
+ expectedMetricsFile: filepath.Join(mockFolder, "two_containers", "expected_metrics.json"),
+ mockDockerEngine: twoContainerEngineMock,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ cfg := createDefaultConfig().(*Config)
+ cfg.Endpoint = tc.mockDockerEngine.URL
+ cfg.EnvVarsToMetricLabels = map[string]string{"ENV_VAR": "env-var-metric-label"}
+ cfg.ContainerLabelsToMetricLabels = map[string]string{"container.label": "container-metric-label"}
+ cfg.ProvidePerCoreCPUMetrics = true
+
+ receiver := newReceiver(componenttest.NewNopReceiverCreateSettings(), cfg)
+ err := receiver.start(context.Background(), componenttest.NewNopHost())
+ require.NoError(t, err)
+
+ actualMetrics, err := tc.scrape(receiver)
+ require.NoError(t, err)
+
+ expectedMetrics, err := golden.ReadMetrics(tc.expectedMetricsFile)
+
+ if !strings.HasPrefix(tc.desc, "scrapeV1") {
+ // Unset various fields for comparison purposes (non-mdatagen implementation doesn't have these set)
+ for i := 0; i < actualMetrics.ResourceMetrics().Len(); i++ {
+ for j := 0; j < actualMetrics.ResourceMetrics().At(i).ScopeMetrics().Len(); j++ {
+ sm := actualMetrics.ResourceMetrics().At(i).ScopeMetrics().At(j)
+ sm.Scope().SetName("")
+ sm.Scope().SetVersion("")
+ for k := 0; k < sm.Metrics().Len(); k++ {
+ sm.Metrics().At(k).SetDescription("")
+ }
+ }
+ }
+ }
+ assert.NoError(t, err)
+ assert.NoError(t, scrapertest.CompareMetrics(expectedMetrics, actualMetrics))
+ })
+ }
+}
+
+func dockerMockServer(urlToFile *map[string]string) (*httptest.Server, error) {
+ urlToFileContents := make(map[string][]byte, len(*urlToFile))
+ for urlPath, filePath := range *urlToFile {
+ err := func() error {
+ f, err := os.Open(filePath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ fileContents, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ urlToFileContents[urlPath] = fileContents
+ return nil
+ }()
+ if err != nil {
+ return nil, err
+ }
+ }
- require.NoError(t, recv.Shutdown(context.Background()))
+ return httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ data, ok := urlToFileContents[req.URL.Path]
+ if !ok {
+ rw.WriteHeader(http.StatusNotFound)
+ return
+ }
+ rw.WriteHeader(http.StatusOK)
+ _, _ = rw.Write(data)
+ })), nil
}
diff --git a/receiver/dockerstatsreceiver/receiver_v2.go b/receiver/dockerstatsreceiver/receiver_v2.go
new file mode 100644
index 000000000000..4de2618f3c16
--- /dev/null
+++ b/receiver/dockerstatsreceiver/receiver_v2.go
@@ -0,0 +1,288 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dockerstatsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver"
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ dtypes "github.com/docker/docker/api/types"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+ "go.opentelemetry.io/collector/receiver/scrapererror"
+ "go.uber.org/multierr"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver/internal/metadata"
+)
+
+const defaultResourcesLen = 5
+
+type resultV2 struct {
+ stats *dtypes.StatsJSON
+ container *docker.Container
+ err error
+}
+
+func (r *receiver) scrapeV2(ctx context.Context) (pmetric.Metrics, error) {
+ containers := r.client.Containers()
+ results := make(chan resultV2, len(containers))
+
+ wg := &sync.WaitGroup{}
+ wg.Add(len(containers))
+ for _, container := range containers {
+ go func(c docker.Container) {
+ defer wg.Done()
+ statsJSON, err := r.client.FetchContainerStatsAsJSON(ctx, c)
+ if err != nil {
+ results <- resultV2{nil, &c, err}
+ return
+ }
+
+ results <- resultV2{
+ stats: statsJSON,
+ container: &c,
+ err: nil}
+ }(container)
+ }
+
+ wg.Wait()
+ close(results)
+
+ var errs error
+
+ now := pcommon.NewTimestampFromTime(time.Now())
+ md := pmetric.NewMetrics()
+ for res := range results {
+ if res.err != nil {
+ // Don't know the number of failed stats, but one container fetch is a partial error.
+ errs = multierr.Append(errs, scrapererror.NewPartialScrapeError(res.err, 0))
+ continue
+ }
+ r.recordContainerStats(now, res.stats, res.container).ResourceMetrics().MoveAndAppendTo(md.ResourceMetrics())
+ }
+
+ return md, errs
+}
+
+func (r *receiver) recordContainerStats(now pcommon.Timestamp, containerStats *dtypes.StatsJSON, container *docker.Container) pmetric.Metrics {
+ r.recordCPUMetrics(now, &containerStats.CPUStats, &containerStats.PreCPUStats)
+ r.recordMemoryMetrics(now, &containerStats.MemoryStats)
+ r.recordBlkioMetrics(now, &containerStats.BlkioStats)
+ r.recordNetworkMetrics(now, &containerStats.Networks)
+
+ // Always-present resource attrs + the user-configured resource attrs
+ resourceCapacity := defaultResourcesLen + len(r.config.EnvVarsToMetricLabels) + len(r.config.ContainerLabelsToMetricLabels)
+ resourceMetricsOptions := make([]metadata.ResourceMetricsOption, 0, resourceCapacity)
+ resourceMetricsOptions = append(resourceMetricsOptions,
+ metadata.WithContainerRuntime("docker"),
+ metadata.WithContainerHostname(container.Config.Hostname),
+ metadata.WithContainerID(container.ID),
+ metadata.WithContainerImageName(container.Config.Image),
+ metadata.WithContainerName(strings.TrimPrefix(container.Name, "/")))
+
+ for k, label := range r.config.EnvVarsToMetricLabels {
+ if v := container.EnvMap[k]; v != "" {
+ resourceMetricsOptions = append(resourceMetricsOptions, func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString(label, v)
+ })
+ }
+ }
+ for k, label := range r.config.ContainerLabelsToMetricLabels {
+ if v := container.Config.Labels[k]; v != "" {
+ resourceMetricsOptions = append(resourceMetricsOptions, func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().UpsertString(label, v)
+ })
+ }
+ }
+
+ return r.mb.Emit(resourceMetricsOptions...)
+}
+
+func (r *receiver) recordMemoryMetrics(now pcommon.Timestamp, memoryStats *dtypes.MemoryStats) {
+ totalCache := memoryStats.Stats["total_cache"]
+ totalUsage := memoryStats.Usage - totalCache
+ r.mb.RecordContainerMemoryUsageMaxDataPoint(now, int64(memoryStats.MaxUsage))
+ r.mb.RecordContainerMemoryPercentDataPoint(now, calculateMemoryPercent(memoryStats))
+ r.mb.RecordContainerMemoryUsageTotalDataPoint(now, int64(totalUsage))
+ r.mb.RecordContainerMemoryUsageLimitDataPoint(now, int64(memoryStats.Limit))
+
+ recorders := map[string]func(pcommon.Timestamp, int64){
+ "cache": r.mb.RecordContainerMemoryCacheDataPoint,
+ "total_cache": r.mb.RecordContainerMemoryTotalCacheDataPoint,
+ "rss": r.mb.RecordContainerMemoryRssDataPoint,
+ "total_rss": r.mb.RecordContainerMemoryTotalRssDataPoint,
+ "rss_huge": r.mb.RecordContainerMemoryRssHugeDataPoint,
+ "total_rss_huge": r.mb.RecordContainerMemoryTotalRssHugeDataPoint,
+ "dirty": r.mb.RecordContainerMemoryDirtyDataPoint,
+ "total_dirty": r.mb.RecordContainerMemoryTotalDirtyDataPoint,
+ "writeback": r.mb.RecordContainerMemoryWritebackDataPoint,
+ "total_writeback": r.mb.RecordContainerMemoryTotalWritebackDataPoint,
+ "mapped_file": r.mb.RecordContainerMemoryMappedFileDataPoint,
+ "total_mapped_file": r.mb.RecordContainerMemoryTotalMappedFileDataPoint,
+ "pgpgin": r.mb.RecordContainerMemoryPgpginDataPoint,
+ "total_pgpgin": r.mb.RecordContainerMemoryTotalPgpginDataPoint,
+ "pgpgout": r.mb.RecordContainerMemoryPgpgoutDataPoint,
+ "total_pgpgout": r.mb.RecordContainerMemoryTotalPgpgoutDataPoint,
+ "swap": r.mb.RecordContainerMemorySwapDataPoint,
+ "total_swap": r.mb.RecordContainerMemoryTotalSwapDataPoint,
+ "pgfault": r.mb.RecordContainerMemoryPgfaultDataPoint,
+ "total_pgfault": r.mb.RecordContainerMemoryTotalPgfaultDataPoint,
+ "pgmajfault": r.mb.RecordContainerMemoryPgmajfaultDataPoint,
+ "total_pgmajfault": r.mb.RecordContainerMemoryTotalPgmajfaultDataPoint,
+ "inactive_anon": r.mb.RecordContainerMemoryInactiveAnonDataPoint,
+ "total_inactive_anon": r.mb.RecordContainerMemoryTotalInactiveAnonDataPoint,
+ "active_anon": r.mb.RecordContainerMemoryActiveAnonDataPoint,
+ "total_active_anon": r.mb.RecordContainerMemoryTotalActiveAnonDataPoint,
+ "inactive_file": r.mb.RecordContainerMemoryInactiveFileDataPoint,
+ "total_inactive_file": r.mb.RecordContainerMemoryTotalInactiveFileDataPoint,
+ "active_file": r.mb.RecordContainerMemoryActiveFileDataPoint,
+ "total_active_file": r.mb.RecordContainerMemoryTotalActiveFileDataPoint,
+ "unevictable": r.mb.RecordContainerMemoryUnevictableDataPoint,
+ "total_unevictable": r.mb.RecordContainerMemoryTotalUnevictableDataPoint,
+ "hierarchical_memory_limit": r.mb.RecordContainerMemoryHierarchicalMemoryLimitDataPoint,
+ "hierarchical_memsw_limit": r.mb.RecordContainerMemoryHierarchicalMemswLimitDataPoint,
+ }
+
+ for name, val := range memoryStats.Stats {
+ if recorder, ok := recorders[name]; ok {
+ recorder(now, int64(val))
+ }
+ }
+}
+
+type blkioRecorder func(now pcommon.Timestamp, val int64, devMaj string, devMin string)
+
+type blkioMapper struct {
+ opToRecorderMap map[string]blkioRecorder
+ entries []dtypes.BlkioStatEntry
+}
+
+func (r *receiver) recordBlkioMetrics(now pcommon.Timestamp, blkioStats *dtypes.BlkioStats) {
+ // These maps can be avoided once the operation is changed to an attribute instead of being in the metric name
+ for _, blkioRecorder := range []blkioMapper{
+ {entries: blkioStats.IoMergedRecursive, opToRecorderMap: map[string]blkioRecorder{
+ "read": r.mb.RecordContainerBlockioIoMergedRecursiveReadDataPoint,
+ "write": r.mb.RecordContainerBlockioIoMergedRecursiveWriteDataPoint,
+ "sync": r.mb.RecordContainerBlockioIoMergedRecursiveSyncDataPoint,
+ "async": r.mb.RecordContainerBlockioIoMergedRecursiveAsyncDataPoint,
+ "discard": r.mb.RecordContainerBlockioIoMergedRecursiveDiscardDataPoint,
+ "total": r.mb.RecordContainerBlockioIoMergedRecursiveTotalDataPoint,
+ }},
+ {entries: blkioStats.IoQueuedRecursive, opToRecorderMap: map[string]blkioRecorder{
+ "read": r.mb.RecordContainerBlockioIoQueuedRecursiveReadDataPoint,
+ "write": r.mb.RecordContainerBlockioIoQueuedRecursiveWriteDataPoint,
+ "sync": r.mb.RecordContainerBlockioIoQueuedRecursiveSyncDataPoint,
+ "async": r.mb.RecordContainerBlockioIoQueuedRecursiveAsyncDataPoint,
+ "discard": r.mb.RecordContainerBlockioIoQueuedRecursiveDiscardDataPoint,
+ "total": r.mb.RecordContainerBlockioIoQueuedRecursiveTotalDataPoint,
+ }},
+ {entries: blkioStats.IoServiceBytesRecursive, opToRecorderMap: map[string]blkioRecorder{
+ "read": r.mb.RecordContainerBlockioIoServiceBytesRecursiveReadDataPoint,
+ "write": r.mb.RecordContainerBlockioIoServiceBytesRecursiveWriteDataPoint,
+ "sync": r.mb.RecordContainerBlockioIoServiceBytesRecursiveSyncDataPoint,
+ "async": r.mb.RecordContainerBlockioIoServiceBytesRecursiveAsyncDataPoint,
+ "discard": r.mb.RecordContainerBlockioIoServiceBytesRecursiveDiscardDataPoint,
+ "total": r.mb.RecordContainerBlockioIoServiceBytesRecursiveTotalDataPoint,
+ }},
+ {entries: blkioStats.IoServiceTimeRecursive, opToRecorderMap: map[string]blkioRecorder{
+ "read": r.mb.RecordContainerBlockioIoServiceTimeRecursiveReadDataPoint,
+ "write": r.mb.RecordContainerBlockioIoServiceTimeRecursiveWriteDataPoint,
+ "sync": r.mb.RecordContainerBlockioIoServiceTimeRecursiveSyncDataPoint,
+ "async": r.mb.RecordContainerBlockioIoServiceTimeRecursiveAsyncDataPoint,
+ "discard": r.mb.RecordContainerBlockioIoServiceTimeRecursiveDiscardDataPoint,
+ "total": r.mb.RecordContainerBlockioIoServiceTimeRecursiveTotalDataPoint,
+ }},
+ {entries: blkioStats.IoServicedRecursive, opToRecorderMap: map[string]blkioRecorder{
+ "read": r.mb.RecordContainerBlockioIoServicedRecursiveReadDataPoint,
+ "write": r.mb.RecordContainerBlockioIoServicedRecursiveWriteDataPoint,
+ "sync": r.mb.RecordContainerBlockioIoServicedRecursiveSyncDataPoint,
+ "async": r.mb.RecordContainerBlockioIoServicedRecursiveAsyncDataPoint,
+ "discard": r.mb.RecordContainerBlockioIoServicedRecursiveDiscardDataPoint,
+ "total": r.mb.RecordContainerBlockioIoServicedRecursiveTotalDataPoint,
+ }},
+ {entries: blkioStats.IoTimeRecursive, opToRecorderMap: map[string]blkioRecorder{
+ "read": r.mb.RecordContainerBlockioIoTimeRecursiveReadDataPoint,
+ "write": r.mb.RecordContainerBlockioIoTimeRecursiveWriteDataPoint,
+ "sync": r.mb.RecordContainerBlockioIoTimeRecursiveSyncDataPoint,
+ "async": r.mb.RecordContainerBlockioIoTimeRecursiveAsyncDataPoint,
+ "discard": r.mb.RecordContainerBlockioIoTimeRecursiveDiscardDataPoint,
+ "total": r.mb.RecordContainerBlockioIoTimeRecursiveTotalDataPoint,
+ }},
+ {entries: blkioStats.IoWaitTimeRecursive, opToRecorderMap: map[string]blkioRecorder{
+ "read": r.mb.RecordContainerBlockioIoWaitTimeRecursiveReadDataPoint,
+ "write": r.mb.RecordContainerBlockioIoWaitTimeRecursiveWriteDataPoint,
+ "sync": r.mb.RecordContainerBlockioIoWaitTimeRecursiveSyncDataPoint,
+ "async": r.mb.RecordContainerBlockioIoWaitTimeRecursiveAsyncDataPoint,
+ "discard": r.mb.RecordContainerBlockioIoWaitTimeRecursiveDiscardDataPoint,
+ "total": r.mb.RecordContainerBlockioIoWaitTimeRecursiveTotalDataPoint,
+ }},
+ {entries: blkioStats.SectorsRecursive, opToRecorderMap: map[string]blkioRecorder{
+ "read": r.mb.RecordContainerBlockioSectorsRecursiveReadDataPoint,
+ "write": r.mb.RecordContainerBlockioSectorsRecursiveWriteDataPoint,
+ "sync": r.mb.RecordContainerBlockioSectorsRecursiveSyncDataPoint,
+ "async": r.mb.RecordContainerBlockioSectorsRecursiveAsyncDataPoint,
+ "discard": r.mb.RecordContainerBlockioSectorsRecursiveDiscardDataPoint,
+ "total": r.mb.RecordContainerBlockioSectorsRecursiveTotalDataPoint,
+ }},
+ } {
+ for _, entry := range blkioRecorder.entries {
+ recorder, ok := blkioRecorder.opToRecorderMap[strings.ToLower(entry.Op)]
+ if !ok {
+ r.settings.Logger.Debug("Unknown operation in blockIO stats.", zap.String("operation", entry.Op))
+ continue
+ }
+ recorder(now, int64(entry.Value), strconv.FormatUint(entry.Major, 10), strconv.FormatUint(entry.Minor, 10))
+ }
+
+ }
+}
+
+func (r *receiver) recordNetworkMetrics(now pcommon.Timestamp, networks *map[string]dtypes.NetworkStats) {
+ if networks == nil || *networks == nil {
+ return
+ }
+
+ for netInterface, stats := range *networks {
+ r.mb.RecordContainerNetworkIoUsageRxBytesDataPoint(now, int64(stats.RxBytes), netInterface)
+ r.mb.RecordContainerNetworkIoUsageTxBytesDataPoint(now, int64(stats.TxBytes), netInterface)
+ r.mb.RecordContainerNetworkIoUsageRxDroppedDataPoint(now, int64(stats.RxDropped), netInterface)
+ r.mb.RecordContainerNetworkIoUsageTxDroppedDataPoint(now, int64(stats.TxDropped), netInterface)
+ r.mb.RecordContainerNetworkIoUsageRxPacketsDataPoint(now, int64(stats.RxPackets), netInterface)
+ r.mb.RecordContainerNetworkIoUsageTxPacketsDataPoint(now, int64(stats.TxPackets), netInterface)
+ r.mb.RecordContainerNetworkIoUsageRxErrorsDataPoint(now, int64(stats.RxErrors), netInterface)
+ r.mb.RecordContainerNetworkIoUsageTxErrorsDataPoint(now, int64(stats.TxErrors), netInterface)
+ }
+}
+
+func (r *receiver) recordCPUMetrics(now pcommon.Timestamp, cpuStats *dtypes.CPUStats, prevStats *dtypes.CPUStats) {
+ r.mb.RecordContainerCPUUsageSystemDataPoint(now, int64(cpuStats.SystemUsage))
+ r.mb.RecordContainerCPUUsageTotalDataPoint(now, int64(cpuStats.CPUUsage.TotalUsage))
+ r.mb.RecordContainerCPUUsageKernelmodeDataPoint(now, int64(cpuStats.CPUUsage.UsageInKernelmode))
+ r.mb.RecordContainerCPUUsageUsermodeDataPoint(now, int64(cpuStats.CPUUsage.UsageInUsermode))
+ r.mb.RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint(now, int64(cpuStats.ThrottlingData.ThrottledPeriods))
+ r.mb.RecordContainerCPUThrottlingDataPeriodsDataPoint(now, int64(cpuStats.ThrottlingData.Periods))
+ r.mb.RecordContainerCPUThrottlingDataThrottledTimeDataPoint(now, int64(cpuStats.ThrottlingData.ThrottledTime))
+ r.mb.RecordContainerCPUPercentDataPoint(now, calculateCPUPercent(prevStats, cpuStats))
+
+ for coreNum, v := range cpuStats.CPUUsage.PercpuUsage {
+ r.mb.RecordContainerCPUUsagePercpuDataPoint(now, int64(v), fmt.Sprintf("cpu%s", strconv.Itoa(coreNum)))
+ }
+}
diff --git a/receiver/dockerstatsreceiver/testdata/mock/single_container/container.json b/receiver/dockerstatsreceiver/testdata/mock/single_container/container.json
new file mode 100644
index 000000000000..5a3df7b67d4b
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/single_container/container.json
@@ -0,0 +1,218 @@
+{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": true,
+ "AttachStdin": true,
+ "AttachStdout": true,
+ "Cmd": [
+ "/bin/sh"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "ENV_VAR=env-var",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "8000/tcp": {}
+ },
+ "Hostname": "10b703fb312b",
+ "Image": "ubuntu",
+ "Labels": {
+ "container.label": "container-label"
+ },
+ "OnBuild": null,
+ "OpenStdin": true,
+ "StdinOnce": true,
+ "Tty": true,
+ "User": "",
+ "Volumes": null,
+ "WorkingDir": ""
+ },
+ "Created": "2022-07-06T04:17:29.79437Z",
+ "Driver": "overlay2",
+ "ExecIDs": null,
+ "GraphDriver": {
+ "Data": {
+ "LowerDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859-init/diff:/var/lib/docker/overlay2/f11adae41a6c3a10b6e8fd2440b5170d8ff4f9979eecb1b43c19e2a996c9937a/diff",
+ "MergedDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/merged",
+ "UpperDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/diff",
+ "WorkDir": "/var/lib/docker/overlay2/669689c31e0a0038beda956dc8ee195c30093890251f497fbee84131e6abe859/work"
+ },
+ "Name": "overlay2"
+ },
+ "HostConfig": {
+ "AutoRemove": false,
+ "Binds": null,
+ "BlkioDeviceReadBps": null,
+ "BlkioDeviceReadIOps": null,
+ "BlkioDeviceWriteBps": null,
+ "BlkioDeviceWriteIOps": null,
+ "BlkioWeight": 0,
+ "BlkioWeightDevice": [],
+ "CapAdd": null,
+ "CapDrop": null,
+ "Cgroup": "",
+ "CgroupParent": "",
+ "CgroupnsMode": "host",
+ "ConsoleSize": [
+ 0,
+ 0
+ ],
+ "ContainerIDFile": "",
+ "CpuCount": 0,
+ "CpuPercent": 0,
+ "CpuPeriod": 0,
+ "CpuQuota": 0,
+ "CpuRealtimePeriod": 0,
+ "CpuRealtimeRuntime": 0,
+ "CpuShares": 0,
+ "CpusetCpus": "",
+ "CpusetMems": "",
+ "DeviceCgroupRules": null,
+ "DeviceRequests": null,
+ "Devices": [],
+ "Dns": [],
+ "DnsOptions": [],
+ "DnsSearch": [],
+ "ExtraHosts": null,
+ "GroupAdd": null,
+ "IOMaximumBandwidth": 0,
+ "IOMaximumIOps": 0,
+ "IpcMode": "private",
+ "Isolation": "",
+ "KernelMemory": 0,
+ "KernelMemoryTCP": 0,
+ "Links": null,
+ "LogConfig": {
+ "Config": {},
+ "Type": "json-file"
+ },
+ "MaskedPaths": [
+ "/proc/asound",
+ "/proc/acpi",
+ "/proc/kcore",
+ "/proc/keys",
+ "/proc/latency_stats",
+ "/proc/timer_list",
+ "/proc/timer_stats",
+ "/proc/sched_debug",
+ "/proc/scsi",
+ "/sys/firmware"
+ ],
+ "Memory": 0,
+ "MemoryReservation": 0,
+ "MemorySwap": 0,
+ "MemorySwappiness": null,
+ "NanoCpus": 0,
+ "NetworkMode": "default",
+ "OomKillDisable": false,
+ "OomScoreAdj": 0,
+ "PidMode": "",
+ "PidsLimit": null,
+ "PortBindings": {
+ "8000/tcp": [
+ {
+ "HostIp": "",
+ "HostPort": "8000"
+ }
+ ]
+ },
+ "Privileged": false,
+ "PublishAllPorts": false,
+ "ReadonlyPaths": [
+ "/proc/bus",
+ "/proc/fs",
+ "/proc/irq",
+ "/proc/sys",
+ "/proc/sysrq-trigger"
+ ],
+ "ReadonlyRootfs": false,
+ "RestartPolicy": {
+ "MaximumRetryCount": 0,
+ "Name": "no"
+ },
+ "Runtime": "runc",
+ "SecurityOpt": null,
+ "ShmSize": 67108864,
+ "UTSMode": "",
+ "Ulimits": null,
+ "UsernsMode": "",
+ "VolumeDriver": "",
+ "VolumesFrom": null
+ },
+ "HostnamePath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/hostname",
+ "HostsPath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/hosts",
+ "Id": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326",
+ "Image": "sha256:825d55fb6340083b06e69e02e823a02918f3ffb575ed2a87026d4645a7fd9e1b",
+ "LogPath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326-json.log",
+ "MountLabel": "",
+ "Mounts": [],
+ "Name": "/bold_sinoussi",
+ "NetworkSettings": {
+ "Bridge": "",
+ "EndpointID": "e844b423ff61ed07aac37c6d9997903ee4771ccffc31ba7dbb3f58f364724170",
+ "Gateway": "10.255.0.1",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "HairpinMode": false,
+ "IPAddress": "10.255.0.2",
+ "IPPrefixLen": 24,
+ "IPv6Gateway": "",
+ "LinkLocalIPv6Address": "",
+ "LinkLocalIPv6PrefixLen": 0,
+ "MacAddress": "02:42:0a:ff:00:01",
+ "Networks": {
+ "bridge": {
+ "Aliases": null,
+ "DriverOpts": null,
+ "EndpointID": "e844b423ff61ed07aac37c6d9997903ee4771ccffc31ba7dbb3f58f364724170",
+ "Gateway": "10.255.0.1",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "IPAMConfig": null,
+ "IPAddress": "10.255.0.2",
+ "IPPrefixLen": 24,
+ "IPv6Gateway": "",
+ "Links": null,
+ "MacAddress": "02:42:0a:ff:00:01",
+ "NetworkID": "c44102203908a4202f675742fcc2384849f4d0b5534d7fb74fd0f3ea7dbee928"
+ }
+ },
+ "Ports": {
+ "8000/tcp": [
+ {
+ "HostIp": "0.0.0.0",
+ "HostPort": "8000"
+ },
+ {
+ "HostIp": "::",
+ "HostPort": "8000"
+ }
+ ]
+ },
+ "SandboxID": "b83b7db7e06d3ba7c4c05208d41d327b0be0e17bfb50a9a57f4d9a31f0fdd662",
+ "SandboxKey": "/var/run/docker/netns/b83b7db7e06d",
+ "SecondaryIPAddresses": null,
+ "SecondaryIPv6Addresses": null
+ },
+ "Path": "/bin/sh",
+ "Platform": "linux",
+ "ProcessLabel": "",
+ "ResolvConfPath": "/var/lib/docker/containers/10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326/resolv.conf",
+ "RestartCount": 0,
+ "State": {
+ "Dead": false,
+ "Error": "",
+ "ExitCode": 0,
+ "FinishedAt": "0001-01-01T00:00:00Z",
+ "OOMKilled": false,
+ "Paused": false,
+ "Pid": 2968,
+ "Restarting": false,
+ "Running": true,
+ "StartedAt": "2022-07-06T04:17:30.2570682Z",
+ "Status": "running"
+ }
+}
diff --git a/receiver/dockerstatsreceiver/testdata/mock/single_container/containers.json b/receiver/dockerstatsreceiver/testdata/mock/single_container/containers.json
new file mode 100644
index 000000000000..10be2e6878d5
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/single_container/containers.json
@@ -0,0 +1,54 @@
+[
+ {
+ "Command": "/bin/sh",
+ "Created": 1657081049,
+ "HostConfig": {
+ "NetworkMode": "default"
+ },
+ "Id": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326",
+ "Image": "ubuntu",
+ "ImageID": "sha256:825d55fb6340083b06e69e02e823a02918f3ffb575ed2a87026d4645a7fd9e1b",
+ "Labels": {
+ "container.label": "container-label"
+ },
+ "Mounts": [],
+ "Names": [
+ "/bold_sinoussi"
+ ],
+ "NetworkSettings": {
+ "Networks": {
+ "bridge": {
+ "Aliases": null,
+ "DriverOpts": null,
+ "EndpointID": "e844b423ff61ed07aac37c6d9997903ee4771ccffc31ba7dbb3f58f364724170",
+ "Gateway": "10.255.0.1",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "IPAMConfig": null,
+ "IPAddress": "10.255.0.2",
+ "IPPrefixLen": 24,
+ "IPv6Gateway": "",
+ "Links": null,
+ "MacAddress": "02:42:0a:ff:00:02",
+ "NetworkID": "c44102203908a4202f675742fcc2384849f4d0b5534d7fb74fd0f3ea7dbee928"
+ }
+ }
+ },
+ "Ports": [
+ {
+ "IP": "0.0.0.0",
+ "PrivatePort": 8000,
+ "PublicPort": 8000,
+ "Type": "tcp"
+ },
+ {
+ "IP": "::",
+ "PrivatePort": 8000,
+ "PublicPort": 8000,
+ "Type": "tcp"
+ }
+ ],
+ "State": "running",
+ "Status": "Up 3 minutes"
+ }
+]
diff --git a/receiver/dockerstatsreceiver/testdata/mock/single_container/expected_metrics.json b/receiver/dockerstatsreceiver/testdata/mock/single_container/expected_metrics.json
new file mode 100644
index 000000000000..809a119caeac
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/single_container/expected_metrics.json
@@ -0,0 +1,1236 @@
+{
+ "resourceMetrics": [
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "container.runtime",
+ "value": {
+ "stringValue": "docker"
+ }
+ },
+ {
+ "key": "container.id",
+ "value": {
+ "stringValue": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326"
+ }
+ },
+ {
+ "key": "container.image.name",
+ "value": {
+ "stringValue": "ubuntu"
+ }
+ },
+ {
+ "key": "container.name",
+ "value": {
+ "stringValue": "bold_sinoussi"
+ }
+ },
+ {
+ "key": "container.hostname",
+ "value": {
+ "stringValue": "10b703fb312b"
+ }
+ },
+ {
+ "key": "env-var-metric-label",
+ "value": {
+ "stringValue": "env-var"
+ }
+ },
+ {
+ "key": "container-metric-label",
+ "value": {
+ "stringValue": "container-label"
+ }
+ }
+ ]
+ },
+ "schemaUrl": "https://opentelemetry.io/schemas/1.6.1",
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "name": "container.blockio.io_service_bytes_recursive.read",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2502656",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.write",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.sync",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2502656",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.async",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.discard",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.total",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2502656",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.read",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "99",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.write",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.sync",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "99",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.async",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.discard",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.total",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "99",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "254"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.usage.system",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "120830550000000",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.usage.total",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "43620018",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.usage.kernelmode",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "10000000",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.usage.usermode",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "10000000",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.throttling_data.periods",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.throttling_data.throttled_periods",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.throttling_data.throttled_time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asDouble": 0.0002888012543185477,
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.cpu.percent",
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.usage.percpu",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1415045",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ },
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu1"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ },
+ {
+ "asInt": "262690",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu2"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ },
+ {
+ "asInt": "762532",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu3"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ },
+ {
+ "asInt": "78532",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu4"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ },
+ {
+ "asInt": "28108575",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu5"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ },
+ {
+ "asInt": "8800811",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu6"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ },
+ {
+ "asInt": "4191833",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu7"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "10449559552",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.usage.limit",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "454656",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.usage.total",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asDouble": 0.004350958504399172,
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.percent",
+ "unit": "1"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "3932160",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.usage.max",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.active_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "270336",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.active_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "2433024",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.cache",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.dirty",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "9223372036854772000",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.hierarchical_memory_limit",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "9223372036854772000",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.hierarchical_memsw_limit",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.inactive_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "2162688",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.inactive_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "1486848",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.mapped_file",
+ "unit": "By"
+ },
+ {
+ "name": "container.memory.pgfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "990",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.pgmajfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.pgpgin",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1287",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.pgpgout",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "667",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.rss",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.rss_huge",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_active_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "270336",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_active_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "2433024",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_cache",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_dirty",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_inactive_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "2162688",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_inactive_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "1486848",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_mapped_file",
+ "unit": "By"
+ },
+ {
+ "name": "container.memory.total_pgfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "990",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.total_pgmajfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.total_pgpgin",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1287",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.total_pgpgout",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "667",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_rss",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_rss_huge",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_unevictable",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.total_writeback",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.unevictable",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771705535206000"
+ }
+ ]
+ },
+ "name": "container.memory.writeback",
+ "unit": "By"
+ },
+ {
+ "name": "container.network.io.usage.rx_bytes",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1532",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.network.io.usage.tx_bytes",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.network.io.usage.rx_dropped",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.rx_errors",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.rx_packets",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "18",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.tx_dropped",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.tx_errors",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.tx_packets",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771705535206000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ }
+ ],
+ "scope": {}
+ }
+ ]
+ }
+ ]
+}
diff --git a/receiver/dockerstatsreceiver/testdata/mock/single_container/stats.json b/receiver/dockerstatsreceiver/testdata/mock/single_container/stats.json
new file mode 100644
index 000000000000..f0b5d6110ab1
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/single_container/stats.json
@@ -0,0 +1,182 @@
+{
+ "blkio_stats": {
+ "io_merged_recursive": [],
+ "io_queue_recursive": [],
+ "io_service_bytes_recursive": [
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Read",
+ "value": 2502656
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Write",
+ "value": 0
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Sync",
+ "value": 2502656
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Async",
+ "value": 0
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Discard",
+ "value": 0
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Total",
+ "value": 2502656
+ }
+ ],
+ "io_service_time_recursive": [],
+ "io_serviced_recursive": [
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Read",
+ "value": 99
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Write",
+ "value": 0
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Sync",
+ "value": 99
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Async",
+ "value": 0
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Discard",
+ "value": 0
+ },
+ {
+ "major": 254,
+ "minor": 0,
+ "op": "Total",
+ "value": 99
+ }
+ ],
+ "io_time_recursive": [],
+ "io_wait_time_recursive": [],
+ "sectors_recursive": []
+ },
+ "cpu_stats": {
+ "cpu_usage": {
+ "percpu_usage": [
+ 1415045,
+ 0,
+ 262690,
+ 762532,
+ 78532,
+ 28108575,
+ 8800811,
+ 4191833
+ ],
+ "total_usage": 43620018,
+ "usage_in_kernelmode": 10000000,
+ "usage_in_usermode": 10000000
+ },
+ "online_cpus": 8,
+ "system_cpu_usage": 120830550000000,
+ "throttling_data": {
+ "periods": 0,
+ "throttled_periods": 0,
+ "throttled_time": 0
+ }
+ },
+ "id": "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326",
+ "memory_stats": {
+ "limit": 10449559552,
+ "max_usage": 3932160,
+ "stats": {
+ "active_anon": 0,
+ "active_file": 270336,
+ "cache": 2433024,
+ "dirty": 0,
+ "hierarchical_memory_limit": 9223372036854772000,
+ "hierarchical_memsw_limit": 9223372036854772000,
+ "inactive_anon": 0,
+ "inactive_file": 2162688,
+ "mapped_file": 1486848,
+ "pgfault": 990,
+ "pgmajfault": 0,
+ "pgpgin": 1287,
+ "pgpgout": 667,
+ "rss": 0,
+ "rss_huge": 0,
+ "total_active_anon": 0,
+ "total_active_file": 270336,
+ "total_cache": 2433024,
+ "total_dirty": 0,
+ "total_inactive_anon": 0,
+ "total_inactive_file": 2162688,
+ "total_mapped_file": 1486848,
+ "total_pgfault": 990,
+ "total_pgmajfault": 0,
+ "total_pgpgin": 1287,
+ "total_pgpgout": 667,
+ "total_rss": 0,
+ "total_rss_huge": 0,
+ "total_unevictable": 0,
+ "total_writeback": 0,
+ "unevictable": 0,
+ "writeback": 0
+ },
+ "usage": 2887680
+ },
+ "name": "/bold_sinoussi",
+ "networks": {
+ "eth0": {
+ "rx_bytes": 1532,
+ "rx_dropped": 0,
+ "rx_errors": 0,
+ "rx_packets": 18,
+ "tx_bytes": 0,
+ "tx_dropped": 0,
+ "tx_errors": 0,
+ "tx_packets": 0
+ }
+ },
+ "num_procs": 0,
+ "pids_stats": {
+ "current": 1
+ },
+ "precpu_stats": {
+ "cpu_usage": {
+ "total_usage": 0,
+ "usage_in_kernelmode": 0,
+ "usage_in_usermode": 0
+ },
+ "throttling_data": {
+ "periods": 0,
+ "throttled_periods": 0,
+ "throttled_time": 0
+ }
+ },
+ "preread": "0001-01-01T00:00:00Z",
+ "read": "2022-07-06T04:27:03.0439251Z",
+ "storage_stats": {}
+}
diff --git a/receiver/dockerstatsreceiver/testdata/mock/two_containers/container1.json b/receiver/dockerstatsreceiver/testdata/mock/two_containers/container1.json
new file mode 100644
index 000000000000..05f77ae4b159
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/two_containers/container1.json
@@ -0,0 +1,218 @@
+{
+ "AppArmorProfile": "docker-default",
+ "Args": [],
+ "Config": {
+ "AttachStderr": true,
+ "AttachStdin": true,
+ "AttachStdout": true,
+ "Cmd": [
+ "/bin/sh"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "ENV_VAR=env-var2",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "8001/tcp": {}
+ },
+ "Hostname": "89d28931fd8b",
+ "Image": "alpine",
+ "Labels": {
+ "container.label": "container-label2"
+ },
+ "OnBuild": null,
+ "OpenStdin": true,
+ "StdinOnce": true,
+ "Tty": true,
+ "User": "",
+ "Volumes": null,
+ "WorkingDir": ""
+ },
+ "Created": "2022-07-12T00:43:40.734856595Z",
+ "Driver": "overlay2",
+ "ExecIDs": null,
+ "GraphDriver": {
+ "Data": {
+ "LowerDir": "/var/lib/docker/overlay2/081b7392f02fe4752c4a9f0fbe03e2c4be635367abfdf746c734d00f2b2502a0-init/diff:/var/lib/docker/overlay2/371c57e1d897cef04ad750fd2d5d31a89ce46c876ba7a0ff2ce5f0bb3a273428/diff",
+ "MergedDir": "/var/lib/docker/overlay2/081b7392f02fe4752c4a9f0fbe03e2c4be635367abfdf746c734d00f2b2502a0/merged",
+ "UpperDir": "/var/lib/docker/overlay2/081b7392f02fe4752c4a9f0fbe03e2c4be635367abfdf746c734d00f2b2502a0/diff",
+ "WorkDir": "/var/lib/docker/overlay2/081b7392f02fe4752c4a9f0fbe03e2c4be635367abfdf746c734d00f2b2502a0/work"
+ },
+ "Name": "overlay2"
+ },
+ "HostConfig": {
+ "AutoRemove": false,
+ "Binds": null,
+ "BlkioDeviceReadBps": null,
+ "BlkioDeviceReadIOps": null,
+ "BlkioDeviceWriteBps": null,
+ "BlkioDeviceWriteIOps": null,
+ "BlkioWeight": 0,
+ "BlkioWeightDevice": [],
+ "CapAdd": null,
+ "CapDrop": null,
+ "Cgroup": "",
+ "CgroupParent": "",
+ "CgroupnsMode": "host",
+ "ConsoleSize": [
+ 0,
+ 0
+ ],
+ "ContainerIDFile": "",
+ "CpuCount": 0,
+ "CpuPercent": 0,
+ "CpuPeriod": 0,
+ "CpuQuota": 0,
+ "CpuRealtimePeriod": 0,
+ "CpuRealtimeRuntime": 0,
+ "CpuShares": 0,
+ "CpusetCpus": "",
+ "CpusetMems": "",
+ "DeviceCgroupRules": null,
+ "DeviceRequests": null,
+ "Devices": [],
+ "Dns": [],
+ "DnsOptions": [],
+ "DnsSearch": [],
+ "ExtraHosts": null,
+ "GroupAdd": null,
+ "IOMaximumBandwidth": 0,
+ "IOMaximumIOps": 0,
+ "IpcMode": "private",
+ "Isolation": "",
+ "KernelMemory": 0,
+ "KernelMemoryTCP": 0,
+ "Links": null,
+ "LogConfig": {
+ "Config": {},
+ "Type": "json-file"
+ },
+ "MaskedPaths": [
+ "/proc/asound",
+ "/proc/acpi",
+ "/proc/kcore",
+ "/proc/keys",
+ "/proc/latency_stats",
+ "/proc/timer_list",
+ "/proc/timer_stats",
+ "/proc/sched_debug",
+ "/proc/scsi",
+ "/sys/firmware"
+ ],
+ "Memory": 0,
+ "MemoryReservation": 0,
+ "MemorySwap": 0,
+ "MemorySwappiness": null,
+ "NanoCpus": 0,
+ "NetworkMode": "default",
+ "OomKillDisable": false,
+ "OomScoreAdj": 0,
+ "PidMode": "",
+ "PidsLimit": null,
+ "PortBindings": {
+ "8001/tcp": [
+ {
+ "HostIp": "",
+ "HostPort": "8001"
+ }
+ ]
+ },
+ "Privileged": false,
+ "PublishAllPorts": false,
+ "ReadonlyPaths": [
+ "/proc/bus",
+ "/proc/fs",
+ "/proc/irq",
+ "/proc/sys",
+ "/proc/sysrq-trigger"
+ ],
+ "ReadonlyRootfs": false,
+ "RestartPolicy": {
+ "MaximumRetryCount": 0,
+ "Name": "no"
+ },
+ "Runtime": "runc",
+ "SecurityOpt": null,
+ "ShmSize": 67108864,
+ "UTSMode": "",
+ "Ulimits": null,
+ "UsernsMode": "",
+ "VolumeDriver": "",
+ "VolumesFrom": null
+ },
+ "HostnamePath": "/var/lib/docker/containers/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7/hostname",
+ "HostsPath": "/var/lib/docker/containers/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7/hosts",
+ "Id": "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7",
+ "Image": "sha256:e66264b98777e12192600bf9b4d663655c98a090072e1bab49e233d7531d1294",
+ "LogPath": "/var/lib/docker/containers/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7-json.log",
+ "MountLabel": "",
+ "Mounts": [],
+ "Name": "/loving_torvalds",
+ "NetworkSettings": {
+ "Bridge": "",
+ "EndpointID": "9990c2e6968d0c4529bc0eef50fcefebe60fc22d698ad16dd786723f8d098913",
+ "Gateway": "172.17.0.1",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "HairpinMode": false,
+ "IPAddress": "172.17.0.3",
+ "IPPrefixLen": 16,
+ "IPv6Gateway": "",
+ "LinkLocalIPv6Address": "",
+ "LinkLocalIPv6PrefixLen": 0,
+ "MacAddress": "02:42:ac:11:00:03",
+ "Networks": {
+ "bridge": {
+ "Aliases": null,
+ "DriverOpts": null,
+ "EndpointID": "9990c2e6968d0c4529bc0eef50fcefebe60fc22d698ad16dd786723f8d098913",
+ "Gateway": "172.17.0.1",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "IPAMConfig": null,
+ "IPAddress": "172.17.0.3",
+ "IPPrefixLen": 16,
+ "IPv6Gateway": "",
+ "Links": null,
+ "MacAddress": "02:42:ac:11:00:03",
+ "NetworkID": "5426c33d912cdac32013f6bf1135ec0dc9319fed4e7a3a9cc6d86e7807030a60"
+ }
+ },
+ "Ports": {
+ "8001/tcp": [
+ {
+ "HostIp": "0.0.0.0",
+ "HostPort": "8001"
+ },
+ {
+ "HostIp": "::",
+ "HostPort": "8001"
+ }
+ ]
+ },
+ "SandboxID": "1b89b33b21d133dba8e55013eb5da5462e33d9f3faf528ff6d1dd999a79b9b7e",
+ "SandboxKey": "/var/run/docker/netns/1b89b33b21d1",
+ "SecondaryIPAddresses": null,
+ "SecondaryIPv6Addresses": null
+ },
+ "Path": "/bin/sh",
+ "Platform": "linux",
+ "ProcessLabel": "",
+ "ResolvConfPath": "/var/lib/docker/containers/89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7/resolv.conf",
+ "RestartCount": 0,
+ "State": {
+ "Dead": false,
+ "Error": "",
+ "ExitCode": 0,
+ "FinishedAt": "0001-01-01T00:00:00Z",
+ "OOMKilled": false,
+ "Paused": false,
+ "Pid": 2429,
+ "Restarting": false,
+ "Running": true,
+ "StartedAt": "2022-07-12T00:43:41.187539768Z",
+ "Status": "running"
+ }
+}
diff --git a/receiver/dockerstatsreceiver/testdata/mock/two_containers/container2.json b/receiver/dockerstatsreceiver/testdata/mock/two_containers/container2.json
new file mode 100644
index 000000000000..519406f3a5c7
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/two_containers/container2.json
@@ -0,0 +1,218 @@
+{
+ "AppArmorProfile": "docker-default",
+ "Args": [],
+ "Config": {
+ "AttachStderr": true,
+ "AttachStdin": true,
+ "AttachStdout": true,
+ "Cmd": [
+ "/bin/sh"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "ENV_VAR=env-var",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "8000/tcp": {}
+ },
+ "Hostname": "a359c0fc87c5",
+ "Image": "ubuntu",
+ "Labels": {
+ "container.label": "container-label"
+ },
+ "OnBuild": null,
+ "OpenStdin": true,
+ "StdinOnce": true,
+ "Tty": true,
+ "User": "",
+ "Volumes": null,
+ "WorkingDir": ""
+ },
+ "Created": "2022-07-12T00:42:44.766615793Z",
+ "Driver": "overlay2",
+ "ExecIDs": null,
+ "GraphDriver": {
+ "Data": {
+ "LowerDir": "/var/lib/docker/overlay2/b8a47a4fec0b50b641c2f89f975ac94c10ef071bfa2b5271e6f292c98769fbe3-init/diff:/var/lib/docker/overlay2/60f77c4f71ec7f5a6e39c5ac1111f22bb03e9b02b9ae723f575c905fc932cb19/diff",
+ "MergedDir": "/var/lib/docker/overlay2/b8a47a4fec0b50b641c2f89f975ac94c10ef071bfa2b5271e6f292c98769fbe3/merged",
+ "UpperDir": "/var/lib/docker/overlay2/b8a47a4fec0b50b641c2f89f975ac94c10ef071bfa2b5271e6f292c98769fbe3/diff",
+ "WorkDir": "/var/lib/docker/overlay2/b8a47a4fec0b50b641c2f89f975ac94c10ef071bfa2b5271e6f292c98769fbe3/work"
+ },
+ "Name": "overlay2"
+ },
+ "HostConfig": {
+ "AutoRemove": false,
+ "Binds": null,
+ "BlkioDeviceReadBps": null,
+ "BlkioDeviceReadIOps": null,
+ "BlkioDeviceWriteBps": null,
+ "BlkioDeviceWriteIOps": null,
+ "BlkioWeight": 0,
+ "BlkioWeightDevice": [],
+ "CapAdd": null,
+ "CapDrop": null,
+ "Cgroup": "",
+ "CgroupParent": "",
+ "CgroupnsMode": "host",
+ "ConsoleSize": [
+ 0,
+ 0
+ ],
+ "ContainerIDFile": "",
+ "CpuCount": 0,
+ "CpuPercent": 0,
+ "CpuPeriod": 0,
+ "CpuQuota": 0,
+ "CpuRealtimePeriod": 0,
+ "CpuRealtimeRuntime": 0,
+ "CpuShares": 0,
+ "CpusetCpus": "",
+ "CpusetMems": "",
+ "DeviceCgroupRules": null,
+ "DeviceRequests": null,
+ "Devices": [],
+ "Dns": [],
+ "DnsOptions": [],
+ "DnsSearch": [],
+ "ExtraHosts": null,
+ "GroupAdd": null,
+ "IOMaximumBandwidth": 0,
+ "IOMaximumIOps": 0,
+ "IpcMode": "private",
+ "Isolation": "",
+ "KernelMemory": 0,
+ "KernelMemoryTCP": 0,
+ "Links": null,
+ "LogConfig": {
+ "Config": {},
+ "Type": "json-file"
+ },
+ "MaskedPaths": [
+ "/proc/asound",
+ "/proc/acpi",
+ "/proc/kcore",
+ "/proc/keys",
+ "/proc/latency_stats",
+ "/proc/timer_list",
+ "/proc/timer_stats",
+ "/proc/sched_debug",
+ "/proc/scsi",
+ "/sys/firmware"
+ ],
+ "Memory": 0,
+ "MemoryReservation": 0,
+ "MemorySwap": 0,
+ "MemorySwappiness": null,
+ "NanoCpus": 0,
+ "NetworkMode": "default",
+ "OomKillDisable": false,
+ "OomScoreAdj": 0,
+ "PidMode": "",
+ "PidsLimit": null,
+ "PortBindings": {
+ "8000/tcp": [
+ {
+ "HostIp": "",
+ "HostPort": "8000"
+ }
+ ]
+ },
+ "Privileged": false,
+ "PublishAllPorts": false,
+ "ReadonlyPaths": [
+ "/proc/bus",
+ "/proc/fs",
+ "/proc/irq",
+ "/proc/sys",
+ "/proc/sysrq-trigger"
+ ],
+ "ReadonlyRootfs": false,
+ "RestartPolicy": {
+ "MaximumRetryCount": 0,
+ "Name": "no"
+ },
+ "Runtime": "runc",
+ "SecurityOpt": null,
+ "ShmSize": 67108864,
+ "UTSMode": "",
+ "Ulimits": null,
+ "UsernsMode": "",
+ "VolumeDriver": "",
+ "VolumesFrom": null
+ },
+ "HostnamePath": "/var/lib/docker/containers/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce/hostname",
+ "HostsPath": "/var/lib/docker/containers/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce/hosts",
+ "Id": "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce",
+ "Image": "sha256:27941809078cc9b2802deb2b0bb6feed6c236cde01e487f200e24653533701ee",
+ "LogPath": "/var/lib/docker/containers/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce-json.log",
+ "MountLabel": "",
+ "Mounts": [],
+ "Name": "/pensive_aryabhata",
+ "NetworkSettings": {
+ "Bridge": "",
+ "EndpointID": "704279259cac47ef58ca868d30a48414ac8fb2757618cec32570ea715b672ade",
+ "Gateway": "172.17.0.1",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "HairpinMode": false,
+ "IPAddress": "172.17.0.2",
+ "IPPrefixLen": 16,
+ "IPv6Gateway": "",
+ "LinkLocalIPv6Address": "",
+ "LinkLocalIPv6PrefixLen": 0,
+ "MacAddress": "02:42:ac:11:00:02",
+ "Networks": {
+ "bridge": {
+ "Aliases": null,
+ "DriverOpts": null,
+ "EndpointID": "704279259cac47ef58ca868d30a48414ac8fb2757618cec32570ea715b672ade",
+ "Gateway": "172.17.0.1",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "IPAMConfig": null,
+ "IPAddress": "172.17.0.2",
+ "IPPrefixLen": 16,
+ "IPv6Gateway": "",
+ "Links": null,
+ "MacAddress": "02:42:ac:11:00:02",
+ "NetworkID": "5426c33d912cdac32013f6bf1135ec0dc9319fed4e7a3a9cc6d86e7807030a60"
+ }
+ },
+ "Ports": {
+ "8000/tcp": [
+ {
+ "HostIp": "0.0.0.0",
+ "HostPort": "8000"
+ },
+ {
+ "HostIp": "::",
+ "HostPort": "8000"
+ }
+ ]
+ },
+ "SandboxID": "6eb3878d8e7903513277d3a9fc93c4dd03d17d08c099e185e81e086e80e6c3ac",
+ "SandboxKey": "/var/run/docker/netns/6eb3878d8e79",
+ "SecondaryIPAddresses": null,
+ "SecondaryIPv6Addresses": null
+ },
+ "Path": "/bin/sh",
+ "Platform": "linux",
+ "ProcessLabel": "",
+ "ResolvConfPath": "/var/lib/docker/containers/a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce/resolv.conf",
+ "RestartCount": 0,
+ "State": {
+ "Dead": false,
+ "Error": "",
+ "ExitCode": 0,
+ "FinishedAt": "0001-01-01T00:00:00Z",
+ "OOMKilled": false,
+ "Paused": false,
+ "Pid": 2327,
+ "Restarting": false,
+ "Running": true,
+ "StartedAt": "2022-07-12T00:42:45.21292516Z",
+ "Status": "running"
+ }
+}
diff --git a/receiver/dockerstatsreceiver/testdata/mock/two_containers/containers.json b/receiver/dockerstatsreceiver/testdata/mock/two_containers/containers.json
new file mode 100644
index 000000000000..9370f8eab82c
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/two_containers/containers.json
@@ -0,0 +1,106 @@
+[
+ {
+ "Command": "/bin/sh",
+ "Created": 1657586620,
+ "HostConfig": {
+ "NetworkMode": "default"
+ },
+ "Id": "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7",
+ "Image": "alpine",
+ "ImageID": "sha256:e66264b98777e12192600bf9b4d663655c98a090072e1bab49e233d7531d1294",
+ "Labels": {
+ "container.label": "container-label2"
+ },
+ "Mounts": [],
+ "Names": [
+ "/loving_torvalds"
+ ],
+ "NetworkSettings": {
+ "Networks": {
+ "bridge": {
+ "Aliases": null,
+ "DriverOpts": null,
+ "EndpointID": "9990c2e6968d0c4529bc0eef50fcefebe60fc22d698ad16dd786723f8d098913",
+ "Gateway": "172.17.0.1",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "IPAMConfig": null,
+ "IPAddress": "172.17.0.3",
+ "IPPrefixLen": 16,
+ "IPv6Gateway": "",
+ "Links": null,
+ "MacAddress": "02:42:ac:11:00:03",
+ "NetworkID": "5426c33d912cdac32013f6bf1135ec0dc9319fed4e7a3a9cc6d86e7807030a60"
+ }
+ }
+ },
+ "Ports": [
+ {
+ "IP": "0.0.0.0",
+ "PrivatePort": 8001,
+ "PublicPort": 8001,
+ "Type": "tcp"
+ },
+ {
+ "IP": "::",
+ "PrivatePort": 8001,
+ "PublicPort": 8001,
+ "Type": "tcp"
+ }
+ ],
+ "State": "running",
+ "Status": "Up 4 hours"
+ },
+ {
+ "Command": "/bin/sh",
+ "Created": 1657586564,
+ "HostConfig": {
+ "NetworkMode": "default"
+ },
+ "Id": "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce",
+ "Image": "ubuntu",
+ "ImageID": "sha256:27941809078cc9b2802deb2b0bb6feed6c236cde01e487f200e24653533701ee",
+ "Labels": {
+ "container.label": "container-label"
+ },
+ "Mounts": [],
+ "Names": [
+ "/pensive_aryabhata"
+ ],
+ "NetworkSettings": {
+ "Networks": {
+ "bridge": {
+ "Aliases": null,
+ "DriverOpts": null,
+ "EndpointID": "704279259cac47ef58ca868d30a48414ac8fb2757618cec32570ea715b672ade",
+ "Gateway": "172.17.0.1",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "IPAMConfig": null,
+ "IPAddress": "172.17.0.2",
+ "IPPrefixLen": 16,
+ "IPv6Gateway": "",
+ "Links": null,
+ "MacAddress": "02:42:ac:11:00:02",
+ "NetworkID": "5426c33d912cdac32013f6bf1135ec0dc9319fed4e7a3a9cc6d86e7807030a60"
+ }
+ }
+ },
+ "Ports": [
+ {
+ "IP": "0.0.0.0",
+ "PrivatePort": 8000,
+ "PublicPort": 8000,
+ "Type": "tcp"
+ },
+ {
+ "IP": "::",
+ "PrivatePort": 8000,
+ "PublicPort": 8000,
+ "Type": "tcp"
+ }
+ ],
+ "State": "running",
+ "Status": "Up 4 hours"
+ }
+]
diff --git a/receiver/dockerstatsreceiver/testdata/mock/two_containers/expected_metrics.json b/receiver/dockerstatsreceiver/testdata/mock/two_containers/expected_metrics.json
new file mode 100644
index 000000000000..f9efee74a73f
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/two_containers/expected_metrics.json
@@ -0,0 +1,2300 @@
+{
+ "resourceMetrics": [
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "container.runtime",
+ "value": {
+ "stringValue": "docker"
+ }
+ },
+ {
+ "key": "container.id",
+ "value": {
+ "stringValue": "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce"
+ }
+ },
+ {
+ "key": "container.image.name",
+ "value": {
+ "stringValue": "ubuntu"
+ }
+ },
+ {
+ "key": "container.name",
+ "value": {
+ "stringValue": "pensive_aryabhata"
+ }
+ },
+ {
+ "key": "container.hostname",
+ "value": {
+ "stringValue": "a359c0fc87c5"
+ }
+ },
+ {
+ "key": "env-var-metric-label",
+ "value": {
+ "stringValue": "env-var"
+ }
+ },
+ {
+ "key": "container-metric-label",
+ "value": {
+ "stringValue": "container-label"
+ }
+ }
+ ]
+ },
+ "schemaUrl": "https://opentelemetry.io/schemas/1.6.1",
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "name": "container.blockio.io_service_bytes_recursive.read",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "73728",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.write",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.sync",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "73728",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.async",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.discard",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.total",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "73728",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.read",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.write",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.sync",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.async",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.discard",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.total",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.usage.system",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "14930240000000",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.usage.total",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "31093384",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.usage.kernelmode",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "10000000",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.usage.usermode",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "10000000",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.throttling_data.periods",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.throttling_data.throttled_periods",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.throttling_data.throttled_time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asDouble": 0,
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.cpu.percent",
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.usage.percpu",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "31093384",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "2074079232",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.usage.limit",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "352256",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.usage.total",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asDouble": 0.016983729192463173,
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.percent",
+ "unit": "1"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "6172672",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.usage.max",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "4096",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.active_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "73728",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.active_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "73728",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.cache",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.dirty",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "9223372036854772000",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.hierarchical_memory_limit",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "9223372036854772000",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.hierarchical_memsw_limit",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "106496",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.inactive_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.inactive_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.mapped_file",
+ "unit": "By"
+ },
+ {
+ "name": "container.memory.pgfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2417",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.pgmajfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.pgpgin",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1980",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.pgpgout",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1935",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "110592",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.rss",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.rss_huge",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "4096",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_active_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "73728",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_active_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "73728",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_cache",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_dirty",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "106496",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_inactive_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_inactive_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_mapped_file",
+ "unit": "By"
+ },
+ {
+ "name": "container.memory.total_pgfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2417",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.total_pgmajfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.total_pgpgin",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1980",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.total_pgpgout",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1935",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "110592",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_rss",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_rss_huge",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_unevictable",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.total_writeback",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.unevictable",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637112000"
+ }
+ ]
+ },
+ "name": "container.memory.writeback",
+ "unit": "By"
+ },
+ {
+ "name": "container.network.io.usage.rx_bytes",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "12394",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.network.io.usage.tx_bytes",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.network.io.usage.rx_dropped",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.rx_errors",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.rx_packets",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "96",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.tx_dropped",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.tx_errors",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.tx_packets",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637112000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ }
+ ],
+ "scope": {}
+ }
+ ]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "container.runtime",
+ "value": {
+ "stringValue": "docker"
+ }
+ },
+ {
+ "key": "container.id",
+ "value": {
+ "stringValue": "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7"
+ }
+ },
+ {
+ "key": "container.image.name",
+ "value": {
+ "stringValue": "alpine"
+ }
+ },
+ {
+ "key": "container.name",
+ "value": {
+ "stringValue": "loving_torvalds"
+ }
+ },
+ {
+ "key": "container.hostname",
+ "value": {
+ "stringValue": "89d28931fd8b"
+ }
+ },
+ {
+ "key": "env-var-metric-label",
+ "value": {
+ "stringValue": "env-var2"
+ }
+ },
+ {
+ "key": "container-metric-label",
+ "value": {
+ "stringValue": "container-label2"
+ }
+ }
+ ]
+ },
+ "schemaUrl": "https://opentelemetry.io/schemas/1.6.1",
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "name": "container.blockio.io_service_bytes_recursive.read",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1187840",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.write",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.sync",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1187840",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.async",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.discard",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_service_bytes_recursive.total",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "1187840",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.read",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "19",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.write",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.sync",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "19",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.async",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.discard",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.blockio.io_serviced_recursive.total",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "19",
+ "attributes": [
+ {
+ "key": "device_major",
+ "value": {
+ "stringValue": "8"
+ }
+ },
+ {
+ "key": "device_minor",
+ "value": {
+ "stringValue": "0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.usage.system",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "14834790000000",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.usage.total",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "34117917",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.usage.kernelmode",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "20000000",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.usage.usermode",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "10000000",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "name": "container.cpu.throttling_data.periods",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.throttling_data.throttled_periods",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.throttling_data.throttled_time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asDouble": 0,
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.cpu.percent",
+ "unit": "1"
+ },
+ {
+ "name": "container.cpu.usage.percpu",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "34117917",
+ "attributes": [
+ {
+ "key": "core",
+ "value": {
+ "stringValue": "cpu0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "ns"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "2074079232",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.usage.limit",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "380928",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.usage.total",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asDouble": 0.01836612575464041,
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.percent",
+ "unit": "1"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "6201344",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.usage.max",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "4096",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.active_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "393216",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.active_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "921600",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.cache",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.dirty",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "9223372036854772000",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.hierarchical_memory_limit",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "9223372036854772000",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.hierarchical_memsw_limit",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "147456",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.inactive_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "528384",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.inactive_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "843776",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.mapped_file",
+ "unit": "By"
+ },
+ {
+ "name": "container.memory.pgfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2469",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.pgmajfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "8",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.pgpgin",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2288",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.pgpgout",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2026",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "151552",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.rss",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.rss_huge",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "4096",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_active_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "393216",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_active_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "921600",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_cache",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_dirty",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "147456",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_inactive_anon",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "528384",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_inactive_file",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "843776",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_mapped_file",
+ "unit": "By"
+ },
+ {
+ "name": "container.memory.total_pgfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2469",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.total_pgmajfault",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "8",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.total_pgpgin",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2288",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.memory.total_pgpgout",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "2026",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "151552",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_rss",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_rss_huge",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_unevictable",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.total_writeback",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.unevictable",
+ "unit": "By"
+ },
+ {
+ "gauge": {
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "timeUnixNano": "1657771832637093000"
+ }
+ ]
+ },
+ "name": "container.memory.writeback",
+ "unit": "By"
+ },
+ {
+ "name": "container.network.io.usage.rx_bytes",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "11313",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.network.io.usage.tx_bytes",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "By"
+ },
+ {
+ "name": "container.network.io.usage.rx_dropped",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.rx_errors",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.rx_packets",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "88",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.tx_dropped",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.tx_errors",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ },
+ {
+ "name": "container.network.io.usage.tx_packets",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "dataPoints": [
+ {
+ "asInt": "0",
+ "attributes": [
+ {
+ "key": "interface",
+ "value": {
+ "stringValue": "eth0"
+ }
+ }
+ ],
+ "timeUnixNano": "1657771832637093000"
+ }
+ ],
+ "isMonotonic": true
+ },
+ "unit": "1"
+ }
+ ],
+ "scope": {}
+ }
+ ]
+ }
+ ]
+}
diff --git a/receiver/dockerstatsreceiver/testdata/mock/two_containers/stats1.json b/receiver/dockerstatsreceiver/testdata/mock/two_containers/stats1.json
new file mode 100644
index 000000000000..9ca148e05517
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/two_containers/stats1.json
@@ -0,0 +1,180 @@
+{
+ "blkio_stats": {
+ "io_merged_recursive": [],
+ "io_queue_recursive": [],
+ "io_service_bytes_recursive": [
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Read",
+ "value": 1187840
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Write",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Sync",
+ "value": 1187840
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Async",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Discard",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Total",
+ "value": 1187840
+ }
+ ],
+ "io_service_time_recursive": [],
+ "io_serviced_recursive": [
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Read",
+ "value": 19
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Write",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Sync",
+ "value": 19
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Async",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Discard",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Total",
+ "value": 19
+ }
+ ],
+ "io_time_recursive": [],
+ "io_wait_time_recursive": [],
+ "sectors_recursive": []
+ },
+ "cpu_stats": {
+ "cpu_usage": {
+ "percpu_usage": [
+ 34117917
+ ],
+ "total_usage": 34117917,
+ "usage_in_kernelmode": 20000000,
+ "usage_in_usermode": 10000000
+ },
+ "online_cpus": 1,
+ "system_cpu_usage": 14834790000000,
+ "throttling_data": {
+ "periods": 0,
+ "throttled_periods": 0,
+ "throttled_time": 0
+ }
+ },
+ "id": "89d28931fd8b95c8806343a532e9e76bf0a0b76ee8f19452b8f75dee1ebcebb7",
+ "memory_stats": {
+ "limit": 2074079232,
+ "max_usage": 6201344,
+ "stats": {
+ "active_anon": 4096,
+ "active_file": 393216,
+ "cache": 921600,
+ "dirty": 0,
+ "hierarchical_memory_limit": 9223372036854772000,
+ "hierarchical_memsw_limit": 9223372036854772000,
+ "inactive_anon": 147456,
+ "inactive_file": 528384,
+ "mapped_file": 843776,
+ "pgfault": 2469,
+ "pgmajfault": 8,
+ "pgpgin": 2288,
+ "pgpgout": 2026,
+ "rss": 151552,
+ "rss_huge": 0,
+ "total_active_anon": 4096,
+ "total_active_file": 393216,
+ "total_cache": 921600,
+ "total_dirty": 0,
+ "total_inactive_anon": 147456,
+ "total_inactive_file": 528384,
+ "total_mapped_file": 843776,
+ "total_pgfault": 2469,
+ "total_pgmajfault": 8,
+ "total_pgpgin": 2288,
+ "total_pgpgout": 2026,
+ "total_rss": 151552,
+ "total_rss_huge": 0,
+ "total_unevictable": 0,
+ "total_writeback": 0,
+ "unevictable": 0,
+ "writeback": 0
+ },
+ "usage": 1302528
+ },
+ "name": "/loving_torvalds",
+ "networks": {
+ "eth0": {
+ "rx_bytes": 11313,
+ "rx_dropped": 0,
+ "rx_errors": 0,
+ "rx_packets": 88,
+ "tx_bytes": 0,
+ "tx_dropped": 0,
+ "tx_errors": 0,
+ "tx_packets": 0
+ }
+ },
+ "num_procs": 0,
+ "pids_stats": {
+ "current": 1
+ },
+ "precpu_stats": {
+ "cpu_usage": {
+ "percpu_usage": [
+ 34117917
+ ],
+ "total_usage": 34117917,
+ "usage_in_kernelmode": 20000000,
+ "usage_in_usermode": 10000000
+ },
+ "online_cpus": 1,
+ "system_cpu_usage": 14833820000000,
+ "throttling_data": {
+ "periods": 0,
+ "throttled_periods": 0,
+ "throttled_time": 0
+ }
+ },
+ "preread": "2022-07-12T05:32:37.708457509Z",
+ "read": "2022-07-12T05:32:38.711168232Z",
+ "storage_stats": {}
+}
diff --git a/receiver/dockerstatsreceiver/testdata/mock/two_containers/stats2.json b/receiver/dockerstatsreceiver/testdata/mock/two_containers/stats2.json
new file mode 100644
index 000000000000..2df1f9dc7a60
--- /dev/null
+++ b/receiver/dockerstatsreceiver/testdata/mock/two_containers/stats2.json
@@ -0,0 +1,180 @@
+{
+ "blkio_stats": {
+ "io_merged_recursive": [],
+ "io_queue_recursive": [],
+ "io_service_bytes_recursive": [
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Read",
+ "value": 73728
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Write",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Sync",
+ "value": 73728
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Async",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Discard",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Total",
+ "value": 73728
+ }
+ ],
+ "io_service_time_recursive": [],
+ "io_serviced_recursive": [
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Read",
+ "value": 1
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Write",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Sync",
+ "value": 1
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Async",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Discard",
+ "value": 0
+ },
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Total",
+ "value": 1
+ }
+ ],
+ "io_time_recursive": [],
+ "io_wait_time_recursive": [],
+ "sectors_recursive": []
+ },
+ "cpu_stats": {
+ "cpu_usage": {
+ "percpu_usage": [
+ 31093384
+ ],
+ "total_usage": 31093384,
+ "usage_in_kernelmode": 10000000,
+ "usage_in_usermode": 10000000
+ },
+ "online_cpus": 1,
+ "system_cpu_usage": 14930240000000,
+ "throttling_data": {
+ "periods": 0,
+ "throttled_periods": 0,
+ "throttled_time": 0
+ }
+ },
+ "id": "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce",
+ "memory_stats": {
+ "limit": 2074079232,
+ "max_usage": 6172672,
+ "stats": {
+ "active_anon": 4096,
+ "active_file": 73728,
+ "cache": 73728,
+ "dirty": 0,
+ "hierarchical_memory_limit": 9223372036854772000,
+ "hierarchical_memsw_limit": 9223372036854772000,
+ "inactive_anon": 106496,
+ "inactive_file": 0,
+ "mapped_file": 0,
+ "pgfault": 2417,
+ "pgmajfault": 1,
+ "pgpgin": 1980,
+ "pgpgout": 1935,
+ "rss": 110592,
+ "rss_huge": 0,
+ "total_active_anon": 4096,
+ "total_active_file": 73728,
+ "total_cache": 73728,
+ "total_dirty": 0,
+ "total_inactive_anon": 106496,
+ "total_inactive_file": 0,
+ "total_mapped_file": 0,
+ "total_pgfault": 2417,
+ "total_pgmajfault": 1,
+ "total_pgpgin": 1980,
+ "total_pgpgout": 1935,
+ "total_rss": 110592,
+ "total_rss_huge": 0,
+ "total_unevictable": 0,
+ "total_writeback": 0,
+ "unevictable": 0,
+ "writeback": 0
+ },
+ "usage": 425984
+ },
+ "name": "/pensive_aryabhata",
+ "networks": {
+ "eth0": {
+ "rx_bytes": 12394,
+ "rx_dropped": 0,
+ "rx_errors": 0,
+ "rx_packets": 96,
+ "tx_bytes": 0,
+ "tx_dropped": 0,
+ "tx_errors": 0,
+ "tx_packets": 0
+ }
+ },
+ "num_procs": 0,
+ "pids_stats": {
+ "current": 1
+ },
+ "precpu_stats": {
+ "cpu_usage": {
+ "percpu_usage": [
+ 31093384
+ ],
+ "total_usage": 31093384,
+ "usage_in_kernelmode": 10000000,
+ "usage_in_usermode": 10000000
+ },
+ "online_cpus": 1,
+ "system_cpu_usage": 14929250000000,
+ "throttling_data": {
+ "periods": 0,
+ "throttled_periods": 0,
+ "throttled_time": 0
+ }
+ },
+ "preread": "2022-07-12T05:34:15.325458676Z",
+ "read": "2022-07-12T05:34:16.328861358Z",
+ "storage_stats": {}
+}
diff --git a/unreleased/mdatagen-dockerstatsreceiver.yaml b/unreleased/mdatagen-dockerstatsreceiver.yaml
new file mode 100755
index 000000000000..c83ae59ce4c4
--- /dev/null
+++ b/unreleased/mdatagen-dockerstatsreceiver.yaml
@@ -0,0 +1,5 @@
+change_type: enhancement
+component: dockerstatsreceiver
+note: "Initial PR for onboarding dockerstats onto mdatagen scraping."
+issues: [9794]
+subtext: "Additionally appends the internal docker client to allow options (bug fix)."