diff --git a/CHANGELOG.md b/CHANGELOG.md
index 09d68b704cdd..032e5426102c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -17,6 +17,7 @@
### 💡 Enhancements 💡
+- `cmd/mdatagen`: Replace enum attributes values with typed constants (#9683)
- `k8sclusterreceiver`: Validate that k8s API supports a resource before setting up a watcher for it (#9523)
- `internal/stanza`: Add support for `remove` operator (#9524)
- `k8sattributesprocessor`: Support regex capture groups in tag_name (#9525)
diff --git a/cmd/mdatagen/main.go b/cmd/mdatagen/main.go
index 2f7406ac250e..696b140ddf0c 100644
--- a/cmd/mdatagen/main.go
+++ b/cmd/mdatagen/main.go
@@ -86,6 +86,9 @@ func generateMetrics(ymlDir string, thisDir string, md metadata, useExpGen bool)
"publicVar": func(s string) (string, error) {
return formatIdentifier(s, true)
},
+ "attributeInfo": func(an attributeName) attribute {
+ return md.Attributes[an]
+ },
"parseImportsRequired": func(metrics map[metricName]metric) bool {
for _, m := range metrics {
if m.Data().HasMetricInputType() {
diff --git a/cmd/mdatagen/metrics_v2.tmpl b/cmd/mdatagen/metrics_v2.tmpl
index 24fbce6891cd..38cba8745f9d 100644
--- a/cmd/mdatagen/metrics_v2.tmpl
+++ b/cmd/mdatagen/metrics_v2.tmpl
@@ -38,6 +38,39 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+{{ range $name, $info := .Attributes }}
+{{- if $info.Enum -}}
+// Attribute{{ $name.Render }} specifies the a value {{ $name }} attribute.
+type Attribute{{ $name.Render }} int
+
+const (
+ _ Attribute{{ $name.Render }} = iota
+ {{- range $info.Enum }}
+ Attribute{{ $name.Render }}{{ . | publicVar }}
+ {{- end }}
+)
+
+// String returns the string representation of the Attribute{{ $name.Render }}.
+func (av Attribute{{ $name.Render }}) String() string {
+ switch av {
+ {{- range $info.Enum }}
+ case Attribute{{ $name.Render }}{{ . | publicVar }}:
+ return "{{ . }}"
+ {{- end }}
+ }
+ return ""
+}
+
+// MapAttribute{{ $name.Render }} is a helper map of string to Attribute{{ $name.Render }} attribute value.
+var MapAttribute{{ $name.Render }} = map[string]Attribute{{ $name.Render }}{
+ {{- range $info.Enum }}
+ "{{ . }}": Attribute{{ $name.Render }}{{ . | publicVar }},
+ {{- end }}
+}
+
+{{ end }}
+{{- end }}
+
{{ range $name, $metric := .Metrics -}}
type metric{{ $name.Render }} struct {
data pmetric.Metric // data buffer for generated metric.
@@ -206,7 +239,9 @@ func (mb *MetricsBuilder) Record{{ $name.Render }}DataPoint(ts pcommon.Timestamp
{{- else }}
{{- if $metric.Data.HasMetricValueType }}, val {{ $metric.Data.MetricValueType.BasicType }}{{- end }}
{{- end -}}
- {{- range $metric.Attributes -}} , {{ .RenderUnexported }}AttributeValue string{{ end }})
+ {{- range $metric.Attributes -}}
+ , {{ .RenderUnexported }}AttributeValue {{ if (attributeInfo .).Enum }}Attribute{{ .Render }}{{ else }}string{{ end }}
+ {{- end }})
{{- if $metric.Data.HasMetricInputType }} error{{ end }} {
{{- if $metric.Data.HasMetricInputType }}
{{- if $metric.Data.HasMetricValueType }}
@@ -225,7 +260,9 @@ func (mb *MetricsBuilder) Record{{ $name.Render }}DataPoint(ts pcommon.Timestamp
} else {
mb.metric{{ $name.Render }}.recordDataPoint(mb.startTime, ts
{{- if $metric.Data.HasMetricValueType }}, i {{ end }}
- {{- range $metric.Attributes -}} , {{ .RenderUnexported }}AttributeValue{{ end }})
+ {{- range $metric.Attributes -}}
+ , {{ .RenderUnexported }}AttributeValue{{ if (attributeInfo .).Enum }}.String(){{ end }}
+ {{- end }})
}
{{- end }}
return nil
@@ -233,7 +270,9 @@ func (mb *MetricsBuilder) Record{{ $name.Render }}DataPoint(ts pcommon.Timestamp
{{- else }}
mb.metric{{ $name.Render }}.recordDataPoint(mb.startTime, ts
{{- if $metric.Data.HasMetricValueType }}, val {{ end }}
- {{- range $metric.Attributes -}} , {{ .RenderUnexported }}AttributeValue{{ end }})
+ {{- range $metric.Attributes -}}
+ , {{ .RenderUnexported }}AttributeValue{{ if (attributeInfo .).Enum }}.String(){{ end }}
+ {{- end }})
{{- end }}
}
{{ end }}
@@ -265,18 +304,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-{{ range $name, $info := .Attributes -}}
-{{ if $info.Enum -}}
-// Attribute{{ $name.Render }} are the possible values that the attribute "{{ $name }}" can have.
-var Attribute{{ $name.Render }} = struct {
-{{- range $info.Enum }}
- {{ . | publicVar }} string
-{{- end }}
-}{
-{{- range $info.Enum }}
- "{{ . }}",
-{{- end }}
-}
-{{ end }}
-{{ end }}
diff --git a/receiver/activedirectorydsreceiver/internal/metadata/generated_metrics_v2.go b/receiver/activedirectorydsreceiver/internal/metadata/generated_metrics_v2.go
index 0e6192a9678a..51cefee645b7 100644
--- a/receiver/activedirectorydsreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/activedirectorydsreceiver/internal/metadata/generated_metrics_v2.go
@@ -95,6 +95,196 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeBindType specifies the a value bind_type attribute.
+type AttributeBindType int
+
+const (
+ _ AttributeBindType = iota
+ AttributeBindTypeServer
+ AttributeBindTypeClient
+)
+
+// String returns the string representation of the AttributeBindType.
+func (av AttributeBindType) String() string {
+ switch av {
+ case AttributeBindTypeServer:
+ return "server"
+ case AttributeBindTypeClient:
+ return "client"
+ }
+ return ""
+}
+
+// MapAttributeBindType is a helper map of string to AttributeBindType attribute value.
+var MapAttributeBindType = map[string]AttributeBindType{
+ "server": AttributeBindTypeServer,
+ "client": AttributeBindTypeClient,
+}
+
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionSent
+ AttributeDirectionReceived
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionSent:
+ return "sent"
+ case AttributeDirectionReceived:
+ return "received"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "sent": AttributeDirectionSent,
+ "received": AttributeDirectionReceived,
+}
+
+// AttributeNetworkDataType specifies the a value network_data_type attribute.
+type AttributeNetworkDataType int
+
+const (
+ _ AttributeNetworkDataType = iota
+ AttributeNetworkDataTypeCompressed
+ AttributeNetworkDataTypeUncompressed
+)
+
+// String returns the string representation of the AttributeNetworkDataType.
+func (av AttributeNetworkDataType) String() string {
+ switch av {
+ case AttributeNetworkDataTypeCompressed:
+ return "compressed"
+ case AttributeNetworkDataTypeUncompressed:
+ return "uncompressed"
+ }
+ return ""
+}
+
+// MapAttributeNetworkDataType is a helper map of string to AttributeNetworkDataType attribute value.
+var MapAttributeNetworkDataType = map[string]AttributeNetworkDataType{
+ "compressed": AttributeNetworkDataTypeCompressed,
+ "uncompressed": AttributeNetworkDataTypeUncompressed,
+}
+
+// AttributeOperationType specifies the a value operation_type attribute.
+type AttributeOperationType int
+
+const (
+ _ AttributeOperationType = iota
+ AttributeOperationTypeRead
+ AttributeOperationTypeWrite
+ AttributeOperationTypeSearch
+)
+
+// String returns the string representation of the AttributeOperationType.
+func (av AttributeOperationType) String() string {
+ switch av {
+ case AttributeOperationTypeRead:
+ return "read"
+ case AttributeOperationTypeWrite:
+ return "write"
+ case AttributeOperationTypeSearch:
+ return "search"
+ }
+ return ""
+}
+
+// MapAttributeOperationType is a helper map of string to AttributeOperationType attribute value.
+var MapAttributeOperationType = map[string]AttributeOperationType{
+ "read": AttributeOperationTypeRead,
+ "write": AttributeOperationTypeWrite,
+ "search": AttributeOperationTypeSearch,
+}
+
+// AttributeSuboperationType specifies the a value suboperation_type attribute.
+type AttributeSuboperationType int
+
+const (
+ _ AttributeSuboperationType = iota
+ AttributeSuboperationTypeSecurityDescriptorPropagationsEvent
+ AttributeSuboperationTypeSearch
+)
+
+// String returns the string representation of the AttributeSuboperationType.
+func (av AttributeSuboperationType) String() string {
+ switch av {
+ case AttributeSuboperationTypeSecurityDescriptorPropagationsEvent:
+ return "security_descriptor_propagations_event"
+ case AttributeSuboperationTypeSearch:
+ return "search"
+ }
+ return ""
+}
+
+// MapAttributeSuboperationType is a helper map of string to AttributeSuboperationType attribute value.
+var MapAttributeSuboperationType = map[string]AttributeSuboperationType{
+ "security_descriptor_propagations_event": AttributeSuboperationTypeSecurityDescriptorPropagationsEvent,
+ "search": AttributeSuboperationTypeSearch,
+}
+
+// AttributeSyncResult specifies the a value sync_result attribute.
+type AttributeSyncResult int
+
+const (
+ _ AttributeSyncResult = iota
+ AttributeSyncResultSuccess
+ AttributeSyncResultSchemaMismatch
+ AttributeSyncResultOther
+)
+
+// String returns the string representation of the AttributeSyncResult.
+func (av AttributeSyncResult) String() string {
+ switch av {
+ case AttributeSyncResultSuccess:
+ return "success"
+ case AttributeSyncResultSchemaMismatch:
+ return "schema_mismatch"
+ case AttributeSyncResultOther:
+ return "other"
+ }
+ return ""
+}
+
+// MapAttributeSyncResult is a helper map of string to AttributeSyncResult attribute value.
+var MapAttributeSyncResult = map[string]AttributeSyncResult{
+ "success": AttributeSyncResultSuccess,
+ "schema_mismatch": AttributeSyncResultSchemaMismatch,
+ "other": AttributeSyncResultOther,
+}
+
+// AttributeValueType specifies the a value value_type attribute.
+type AttributeValueType int
+
+const (
+ _ AttributeValueType = iota
+ AttributeValueTypeDistingushedNames
+ AttributeValueTypeOther
+)
+
+// String returns the string representation of the AttributeValueType.
+func (av AttributeValueType) String() string {
+ switch av {
+ case AttributeValueTypeDistingushedNames:
+ return "distingushed_names"
+ case AttributeValueTypeOther:
+ return "other"
+ }
+ return ""
+}
+
+// MapAttributeValueType is a helper map of string to AttributeValueType attribute value.
+var MapAttributeValueType = map[string]AttributeValueType{
+ "distingushed_names": AttributeValueTypeDistingushedNames,
+ "other": AttributeValueTypeOther,
+}
+
type metricActiveDirectoryDsBindRate struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -1154,8 +1344,8 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordActiveDirectoryDsBindRateDataPoint adds a data point to active_directory.ds.bind.rate metric.
-func (mb *MetricsBuilder) RecordActiveDirectoryDsBindRateDataPoint(ts pcommon.Timestamp, val float64, bindTypeAttributeValue string) {
- mb.metricActiveDirectoryDsBindRate.recordDataPoint(mb.startTime, ts, val, bindTypeAttributeValue)
+func (mb *MetricsBuilder) RecordActiveDirectoryDsBindRateDataPoint(ts pcommon.Timestamp, val float64, bindTypeAttributeValue AttributeBindType) {
+ mb.metricActiveDirectoryDsBindRate.recordDataPoint(mb.startTime, ts, val, bindTypeAttributeValue.String())
}
// RecordActiveDirectoryDsLdapBindLastSuccessfulTimeDataPoint adds a data point to active_directory.ds.ldap.bind.last_successful.time metric.
@@ -1189,18 +1379,18 @@ func (mb *MetricsBuilder) RecordActiveDirectoryDsNotificationQueuedDataPoint(ts
}
// RecordActiveDirectoryDsOperationRateDataPoint adds a data point to active_directory.ds.operation.rate metric.
-func (mb *MetricsBuilder) RecordActiveDirectoryDsOperationRateDataPoint(ts pcommon.Timestamp, val float64, operationTypeAttributeValue string) {
- mb.metricActiveDirectoryDsOperationRate.recordDataPoint(mb.startTime, ts, val, operationTypeAttributeValue)
+func (mb *MetricsBuilder) RecordActiveDirectoryDsOperationRateDataPoint(ts pcommon.Timestamp, val float64, operationTypeAttributeValue AttributeOperationType) {
+ mb.metricActiveDirectoryDsOperationRate.recordDataPoint(mb.startTime, ts, val, operationTypeAttributeValue.String())
}
// RecordActiveDirectoryDsReplicationNetworkIoDataPoint adds a data point to active_directory.ds.replication.network.io metric.
-func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationNetworkIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string, networkDataTypeAttributeValue string) {
- mb.metricActiveDirectoryDsReplicationNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue, networkDataTypeAttributeValue)
+func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationNetworkIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection, networkDataTypeAttributeValue AttributeNetworkDataType) {
+ mb.metricActiveDirectoryDsReplicationNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String(), networkDataTypeAttributeValue.String())
}
// RecordActiveDirectoryDsReplicationObjectRateDataPoint adds a data point to active_directory.ds.replication.object.rate metric.
-func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationObjectRateDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) {
- mb.metricActiveDirectoryDsReplicationObjectRate.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationObjectRateDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
+ mb.metricActiveDirectoryDsReplicationObjectRate.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordActiveDirectoryDsReplicationOperationPendingDataPoint adds a data point to active_directory.ds.replication.operation.pending metric.
@@ -1209,8 +1399,8 @@ func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationOperationPendingData
}
// RecordActiveDirectoryDsReplicationPropertyRateDataPoint adds a data point to active_directory.ds.replication.property.rate metric.
-func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationPropertyRateDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) {
- mb.metricActiveDirectoryDsReplicationPropertyRate.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationPropertyRateDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
+ mb.metricActiveDirectoryDsReplicationPropertyRate.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordActiveDirectoryDsReplicationSyncObjectPendingDataPoint adds a data point to active_directory.ds.replication.sync.object.pending metric.
@@ -1219,13 +1409,13 @@ func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationSyncObjectPendingDat
}
// RecordActiveDirectoryDsReplicationSyncRequestCountDataPoint adds a data point to active_directory.ds.replication.sync.request.count metric.
-func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationSyncRequestCountDataPoint(ts pcommon.Timestamp, val int64, syncResultAttributeValue string) {
- mb.metricActiveDirectoryDsReplicationSyncRequestCount.recordDataPoint(mb.startTime, ts, val, syncResultAttributeValue)
+func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationSyncRequestCountDataPoint(ts pcommon.Timestamp, val int64, syncResultAttributeValue AttributeSyncResult) {
+ mb.metricActiveDirectoryDsReplicationSyncRequestCount.recordDataPoint(mb.startTime, ts, val, syncResultAttributeValue.String())
}
// RecordActiveDirectoryDsReplicationValueRateDataPoint adds a data point to active_directory.ds.replication.value.rate metric.
-func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationValueRateDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string, valueTypeAttributeValue string) {
- mb.metricActiveDirectoryDsReplicationValueRate.recordDataPoint(mb.startTime, ts, val, directionAttributeValue, valueTypeAttributeValue)
+func (mb *MetricsBuilder) RecordActiveDirectoryDsReplicationValueRateDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection, valueTypeAttributeValue AttributeValueType) {
+ mb.metricActiveDirectoryDsReplicationValueRate.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String(), valueTypeAttributeValue.String())
}
// RecordActiveDirectoryDsSecurityDescriptorPropagationsEventQueuedDataPoint adds a data point to active_directory.ds.security_descriptor_propagations_event.queued metric.
@@ -1234,8 +1424,8 @@ func (mb *MetricsBuilder) RecordActiveDirectoryDsSecurityDescriptorPropagationsE
}
// RecordActiveDirectoryDsSuboperationRateDataPoint adds a data point to active_directory.ds.suboperation.rate metric.
-func (mb *MetricsBuilder) RecordActiveDirectoryDsSuboperationRateDataPoint(ts pcommon.Timestamp, val float64, suboperationTypeAttributeValue string) {
- mb.metricActiveDirectoryDsSuboperationRate.recordDataPoint(mb.startTime, ts, val, suboperationTypeAttributeValue)
+func (mb *MetricsBuilder) RecordActiveDirectoryDsSuboperationRateDataPoint(ts pcommon.Timestamp, val float64, suboperationTypeAttributeValue AttributeSuboperationType) {
+ mb.metricActiveDirectoryDsSuboperationRate.recordDataPoint(mb.startTime, ts, val, suboperationTypeAttributeValue.String())
}
// RecordActiveDirectoryDsThreadCountDataPoint adds a data point to active_directory.ds.thread.count metric.
@@ -1280,70 +1470,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeBindType are the possible values that the attribute "bind_type" can have.
-var AttributeBindType = struct {
- Server string
- Client string
-}{
- "server",
- "client",
-}
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Sent string
- Received string
-}{
- "sent",
- "received",
-}
-
-// AttributeNetworkDataType are the possible values that the attribute "network_data_type" can have.
-var AttributeNetworkDataType = struct {
- Compressed string
- Uncompressed string
-}{
- "compressed",
- "uncompressed",
-}
-
-// AttributeOperationType are the possible values that the attribute "operation_type" can have.
-var AttributeOperationType = struct {
- Read string
- Write string
- Search string
-}{
- "read",
- "write",
- "search",
-}
-
-// AttributeSuboperationType are the possible values that the attribute "suboperation_type" can have.
-var AttributeSuboperationType = struct {
- SecurityDescriptorPropagationsEvent string
- Search string
-}{
- "security_descriptor_propagations_event",
- "search",
-}
-
-// AttributeSyncResult are the possible values that the attribute "sync_result" can have.
-var AttributeSyncResult = struct {
- Success string
- SchemaMismatch string
- Other string
-}{
- "success",
- "schema_mismatch",
- "other",
-}
-
-// AttributeValueType are the possible values that the attribute "value_type" can have.
-var AttributeValueType = struct {
- DistingushedNames string
- Other string
-}{
- "distingushed_names",
- "other",
-}
diff --git a/receiver/activedirectorydsreceiver/scraper.go b/receiver/activedirectorydsreceiver/scraper.go
index cb8f0f1bfac6..03a6166c08d7 100644
--- a/receiver/activedirectorydsreceiver/scraper.go
+++ b/receiver/activedirectorydsreceiver/scraper.go
@@ -62,25 +62,25 @@ func (a *activeDirectoryDSScraper) scrape(ctx context.Context) (pmetric.Metrics,
draInboundBytesCompressed, err := a.w.Scrape(draInboundBytesCompressed)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsReplicationNetworkIoDataPoint(now, int64(draInboundBytesCompressed), metadata.AttributeDirection.Received, metadata.AttributeNetworkDataType.Compressed)
+ a.mb.RecordActiveDirectoryDsReplicationNetworkIoDataPoint(now, int64(draInboundBytesCompressed), metadata.AttributeDirectionReceived, metadata.AttributeNetworkDataTypeCompressed)
}
draInboundBytesNotCompressed, err := a.w.Scrape(draInboundBytesNotCompressed)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsReplicationNetworkIoDataPoint(now, int64(draInboundBytesNotCompressed), metadata.AttributeDirection.Received, metadata.AttributeNetworkDataType.Uncompressed)
+ a.mb.RecordActiveDirectoryDsReplicationNetworkIoDataPoint(now, int64(draInboundBytesNotCompressed), metadata.AttributeDirectionReceived, metadata.AttributeNetworkDataTypeUncompressed)
}
draOutboundBytesCompressed, err := a.w.Scrape(draOutboundBytesCompressed)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsReplicationNetworkIoDataPoint(now, int64(draOutboundBytesCompressed), metadata.AttributeDirection.Sent, metadata.AttributeNetworkDataType.Compressed)
+ a.mb.RecordActiveDirectoryDsReplicationNetworkIoDataPoint(now, int64(draOutboundBytesCompressed), metadata.AttributeDirectionSent, metadata.AttributeNetworkDataTypeCompressed)
}
draOutboundBytesNotCompressed, err := a.w.Scrape(draOutboundBytesNotCompressed)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsReplicationNetworkIoDataPoint(now, int64(draOutboundBytesNotCompressed), metadata.AttributeDirection.Sent, metadata.AttributeNetworkDataType.Uncompressed)
+ a.mb.RecordActiveDirectoryDsReplicationNetworkIoDataPoint(now, int64(draOutboundBytesNotCompressed), metadata.AttributeDirectionSent, metadata.AttributeNetworkDataTypeUncompressed)
}
draInboundFullSyncObjectsRemaining, err := a.w.Scrape(draInboundFullSyncObjectsRemaining)
@@ -92,51 +92,51 @@ func (a *activeDirectoryDSScraper) scrape(ctx context.Context) (pmetric.Metrics,
draInboundObjects, err := a.w.Scrape(draInboundObjects)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsReplicationObjectRateDataPoint(now, draInboundObjects, metadata.AttributeDirection.Received)
+ a.mb.RecordActiveDirectoryDsReplicationObjectRateDataPoint(now, draInboundObjects, metadata.AttributeDirectionReceived)
}
draOutboundObjects, err := a.w.Scrape(draOutboundObjects)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsReplicationObjectRateDataPoint(now, draOutboundObjects, metadata.AttributeDirection.Sent)
+ a.mb.RecordActiveDirectoryDsReplicationObjectRateDataPoint(now, draOutboundObjects, metadata.AttributeDirectionSent)
}
draInboundProperties, err := a.w.Scrape(draInboundProperties)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsReplicationPropertyRateDataPoint(now, draInboundProperties, metadata.AttributeDirection.Received)
+ a.mb.RecordActiveDirectoryDsReplicationPropertyRateDataPoint(now, draInboundProperties, metadata.AttributeDirectionReceived)
}
draOutboundProperties, err := a.w.Scrape(draOutboundProperties)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsReplicationPropertyRateDataPoint(now, draOutboundProperties, metadata.AttributeDirection.Sent)
+ a.mb.RecordActiveDirectoryDsReplicationPropertyRateDataPoint(now, draOutboundProperties, metadata.AttributeDirectionSent)
}
draInboundValuesDNs, dnsErr := a.w.Scrape(draInboundValuesDNs)
multiErr = multierr.Append(multiErr, dnsErr)
if dnsErr == nil {
- a.mb.RecordActiveDirectoryDsReplicationValueRateDataPoint(now, draInboundValuesDNs, metadata.AttributeDirection.Received, metadata.AttributeValueType.DistingushedNames)
+ a.mb.RecordActiveDirectoryDsReplicationValueRateDataPoint(now, draInboundValuesDNs, metadata.AttributeDirectionReceived, metadata.AttributeValueTypeDistingushedNames)
}
draInboundValuesTotal, totalErr := a.w.Scrape(draInboundValuesTotal)
multiErr = multierr.Append(multiErr, totalErr)
if dnsErr == nil && totalErr == nil {
otherValuesInbound := draInboundValuesTotal - draInboundValuesDNs
- a.mb.RecordActiveDirectoryDsReplicationValueRateDataPoint(now, otherValuesInbound, metadata.AttributeDirection.Received, metadata.AttributeValueType.Other)
+ a.mb.RecordActiveDirectoryDsReplicationValueRateDataPoint(now, otherValuesInbound, metadata.AttributeDirectionReceived, metadata.AttributeValueTypeOther)
}
draOutboundValuesDNs, dnsErr := a.w.Scrape(draOutboundValuesDNs)
multiErr = multierr.Append(multiErr, dnsErr)
if dnsErr == nil {
- a.mb.RecordActiveDirectoryDsReplicationValueRateDataPoint(now, draOutboundValuesDNs, metadata.AttributeDirection.Sent, metadata.AttributeValueType.DistingushedNames)
+ a.mb.RecordActiveDirectoryDsReplicationValueRateDataPoint(now, draOutboundValuesDNs, metadata.AttributeDirectionSent, metadata.AttributeValueTypeDistingushedNames)
}
draOutboundValuesTotal, totalErr := a.w.Scrape(draOutboundValuesTotal)
multiErr = multierr.Append(multiErr, totalErr)
if dnsErr == nil && totalErr == nil {
otherValuesOutbound := draOutboundValuesTotal - draOutboundValuesDNs
- a.mb.RecordActiveDirectoryDsReplicationValueRateDataPoint(now, otherValuesOutbound, metadata.AttributeDirection.Sent, metadata.AttributeValueType.Other)
+ a.mb.RecordActiveDirectoryDsReplicationValueRateDataPoint(now, otherValuesOutbound, metadata.AttributeDirectionSent, metadata.AttributeValueTypeOther)
}
draPendingReplicationOperations, err := a.w.Scrape(draPendingReplicationOperations)
@@ -148,50 +148,50 @@ func (a *activeDirectoryDSScraper) scrape(ctx context.Context) (pmetric.Metrics,
draSyncFailuresSchemaMistmatch, schemaMismatchErr := a.w.Scrape(draSyncFailuresSchemaMismatch)
multiErr = multierr.Append(multiErr, schemaMismatchErr)
if schemaMismatchErr == nil {
- a.mb.RecordActiveDirectoryDsReplicationSyncRequestCountDataPoint(now, int64(draSyncFailuresSchemaMistmatch), metadata.AttributeSyncResult.SchemaMismatch)
+ a.mb.RecordActiveDirectoryDsReplicationSyncRequestCountDataPoint(now, int64(draSyncFailuresSchemaMistmatch), metadata.AttributeSyncResultSchemaMismatch)
}
draSyncRequestsSuccessful, requestsSuccessfulErr := a.w.Scrape(draSyncRequestsSuccessful)
multiErr = multierr.Append(multiErr, requestsSuccessfulErr)
if requestsSuccessfulErr == nil {
- a.mb.RecordActiveDirectoryDsReplicationSyncRequestCountDataPoint(now, int64(draSyncRequestsSuccessful), metadata.AttributeSyncResult.Success)
+ a.mb.RecordActiveDirectoryDsReplicationSyncRequestCountDataPoint(now, int64(draSyncRequestsSuccessful), metadata.AttributeSyncResultSuccess)
}
draSyncRequestsTotal, totalErr := a.w.Scrape(draSyncRequestsMade)
multiErr = multierr.Append(multiErr, totalErr)
if totalErr == nil && requestsSuccessfulErr == nil && schemaMismatchErr == nil {
otherReplicationSyncRequests := draSyncRequestsTotal - draSyncRequestsSuccessful - draSyncFailuresSchemaMistmatch
- a.mb.RecordActiveDirectoryDsReplicationSyncRequestCountDataPoint(now, int64(otherReplicationSyncRequests), metadata.AttributeSyncResult.Other)
+ a.mb.RecordActiveDirectoryDsReplicationSyncRequestCountDataPoint(now, int64(otherReplicationSyncRequests), metadata.AttributeSyncResultOther)
}
dsDirectoryReads, err := a.w.Scrape(dsDirectoryReads)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsOperationRateDataPoint(now, dsDirectoryReads, metadata.AttributeOperationType.Read)
+ a.mb.RecordActiveDirectoryDsOperationRateDataPoint(now, dsDirectoryReads, metadata.AttributeOperationTypeRead)
}
dsDirectoryWrites, err := a.w.Scrape(dsDirectoryWrites)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsOperationRateDataPoint(now, dsDirectoryWrites, metadata.AttributeOperationType.Write)
+ a.mb.RecordActiveDirectoryDsOperationRateDataPoint(now, dsDirectoryWrites, metadata.AttributeOperationTypeWrite)
}
dsDirectorySearches, err := a.w.Scrape(dsDirectorySearches)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsOperationRateDataPoint(now, dsDirectorySearches, metadata.AttributeOperationType.Search)
+ a.mb.RecordActiveDirectoryDsOperationRateDataPoint(now, dsDirectorySearches, metadata.AttributeOperationTypeSearch)
}
dsClientBinds, err := a.w.Scrape(dsClientBinds)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsBindRateDataPoint(now, dsClientBinds, metadata.AttributeBindType.Client)
+ a.mb.RecordActiveDirectoryDsBindRateDataPoint(now, dsClientBinds, metadata.AttributeBindTypeClient)
}
dsServerBinds, err := a.w.Scrape(dsServerBinds)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsBindRateDataPoint(now, dsServerBinds, metadata.AttributeBindType.Server)
+ a.mb.RecordActiveDirectoryDsBindRateDataPoint(now, dsServerBinds, metadata.AttributeBindTypeServer)
}
dsCacheHitRate, err := a.w.Scrape(dsNameCacheHitRate)
@@ -215,13 +215,13 @@ func (a *activeDirectoryDSScraper) scrape(ctx context.Context) (pmetric.Metrics,
securityDescSubops, err := a.w.Scrape(dsSecurityDescripterSubOperations)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsSuboperationRateDataPoint(now, securityDescSubops, metadata.AttributeSuboperationType.SecurityDescriptorPropagationsEvent)
+ a.mb.RecordActiveDirectoryDsSuboperationRateDataPoint(now, securityDescSubops, metadata.AttributeSuboperationTypeSecurityDescriptorPropagationsEvent)
}
searchSubops, err := a.w.Scrape(dsSearchSubOperations)
multiErr = multierr.Append(multiErr, err)
if err == nil {
- a.mb.RecordActiveDirectoryDsSuboperationRateDataPoint(now, searchSubops, metadata.AttributeSuboperationType.Search)
+ a.mb.RecordActiveDirectoryDsSuboperationRateDataPoint(now, searchSubops, metadata.AttributeSuboperationTypeSearch)
}
threadsInUse, err := a.w.Scrape(dsThreadsInUse)
diff --git a/receiver/apachereceiver/documentation.md b/receiver/apachereceiver/documentation.md
index 247c870df6ec..94abdfa7759e 100644
--- a/receiver/apachereceiver/documentation.md
+++ b/receiver/apachereceiver/documentation.md
@@ -28,6 +28,6 @@ metrics:
| Name | Description | Values |
| ---- | ----------- | ------ |
-| scoreboard_state (state) | The state of a connection. | open, waiting, starting, reading, sending, keepalive, dnslookup, closing, logging, finishing, idle_cleanup |
+| scoreboard_state (state) | The state of a connection. | open, waiting, starting, reading, sending, keepalive, dnslookup, closing, logging, finishing, idle_cleanup, unknown |
| server_name | The name of the Apache HTTP server. | |
| workers_state (state) | The state of workers. | busy, idle |
diff --git a/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go b/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go
index 377d5f562c24..2930d755db22 100644
--- a/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go
@@ -49,6 +49,98 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeScoreboardState specifies the a value scoreboard_state attribute.
+type AttributeScoreboardState int
+
+const (
+ _ AttributeScoreboardState = iota
+ AttributeScoreboardStateOpen
+ AttributeScoreboardStateWaiting
+ AttributeScoreboardStateStarting
+ AttributeScoreboardStateReading
+ AttributeScoreboardStateSending
+ AttributeScoreboardStateKeepalive
+ AttributeScoreboardStateDnslookup
+ AttributeScoreboardStateClosing
+ AttributeScoreboardStateLogging
+ AttributeScoreboardStateFinishing
+ AttributeScoreboardStateIdleCleanup
+ AttributeScoreboardStateUnknown
+)
+
+// String returns the string representation of the AttributeScoreboardState.
+func (av AttributeScoreboardState) String() string {
+ switch av {
+ case AttributeScoreboardStateOpen:
+ return "open"
+ case AttributeScoreboardStateWaiting:
+ return "waiting"
+ case AttributeScoreboardStateStarting:
+ return "starting"
+ case AttributeScoreboardStateReading:
+ return "reading"
+ case AttributeScoreboardStateSending:
+ return "sending"
+ case AttributeScoreboardStateKeepalive:
+ return "keepalive"
+ case AttributeScoreboardStateDnslookup:
+ return "dnslookup"
+ case AttributeScoreboardStateClosing:
+ return "closing"
+ case AttributeScoreboardStateLogging:
+ return "logging"
+ case AttributeScoreboardStateFinishing:
+ return "finishing"
+ case AttributeScoreboardStateIdleCleanup:
+ return "idle_cleanup"
+ case AttributeScoreboardStateUnknown:
+ return "unknown"
+ }
+ return ""
+}
+
+// MapAttributeScoreboardState is a helper map of string to AttributeScoreboardState attribute value.
+var MapAttributeScoreboardState = map[string]AttributeScoreboardState{
+ "open": AttributeScoreboardStateOpen,
+ "waiting": AttributeScoreboardStateWaiting,
+ "starting": AttributeScoreboardStateStarting,
+ "reading": AttributeScoreboardStateReading,
+ "sending": AttributeScoreboardStateSending,
+ "keepalive": AttributeScoreboardStateKeepalive,
+ "dnslookup": AttributeScoreboardStateDnslookup,
+ "closing": AttributeScoreboardStateClosing,
+ "logging": AttributeScoreboardStateLogging,
+ "finishing": AttributeScoreboardStateFinishing,
+ "idle_cleanup": AttributeScoreboardStateIdleCleanup,
+ "unknown": AttributeScoreboardStateUnknown,
+}
+
+// AttributeWorkersState specifies the a value workers_state attribute.
+type AttributeWorkersState int
+
+const (
+ _ AttributeWorkersState = iota
+ AttributeWorkersStateBusy
+ AttributeWorkersStateIdle
+)
+
+// String returns the string representation of the AttributeWorkersState.
+func (av AttributeWorkersState) String() string {
+ switch av {
+ case AttributeWorkersStateBusy:
+ return "busy"
+ case AttributeWorkersStateIdle:
+ return "idle"
+ }
+ return ""
+}
+
+// MapAttributeWorkersState is a helper map of string to AttributeWorkersState attribute value.
+var MapAttributeWorkersState = map[string]AttributeWorkersState{
+ "busy": AttributeWorkersStateBusy,
+ "idle": AttributeWorkersStateIdle,
+}
+
type metricApacheCurrentConnections struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -480,8 +572,8 @@ func (mb *MetricsBuilder) RecordApacheRequestsDataPoint(ts pcommon.Timestamp, va
}
// RecordApacheScoreboardDataPoint adds a data point to apache.scoreboard metric.
-func (mb *MetricsBuilder) RecordApacheScoreboardDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string, scoreboardStateAttributeValue string) {
- mb.metricApacheScoreboard.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue, scoreboardStateAttributeValue)
+func (mb *MetricsBuilder) RecordApacheScoreboardDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string, scoreboardStateAttributeValue AttributeScoreboardState) {
+ mb.metricApacheScoreboard.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue, scoreboardStateAttributeValue.String())
}
// RecordApacheTrafficDataPoint adds a data point to apache.traffic metric.
@@ -500,11 +592,11 @@ func (mb *MetricsBuilder) RecordApacheUptimeDataPoint(ts pcommon.Timestamp, val
}
// RecordApacheWorkersDataPoint adds a data point to apache.workers metric.
-func (mb *MetricsBuilder) RecordApacheWorkersDataPoint(ts pcommon.Timestamp, val string, serverNameAttributeValue string, workersStateAttributeValue string) error {
+func (mb *MetricsBuilder) RecordApacheWorkersDataPoint(ts pcommon.Timestamp, val string, serverNameAttributeValue string, workersStateAttributeValue AttributeWorkersState) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for ApacheWorkers, value was %s: %w", val, err)
} else {
- mb.metricApacheWorkers.recordDataPoint(mb.startTime, ts, i, serverNameAttributeValue, workersStateAttributeValue)
+ mb.metricApacheWorkers.recordDataPoint(mb.startTime, ts, i, serverNameAttributeValue, workersStateAttributeValue.String())
}
return nil
}
@@ -534,39 +626,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeScoreboardState are the possible values that the attribute "scoreboard_state" can have.
-var AttributeScoreboardState = struct {
- Open string
- Waiting string
- Starting string
- Reading string
- Sending string
- Keepalive string
- Dnslookup string
- Closing string
- Logging string
- Finishing string
- IdleCleanup string
-}{
- "open",
- "waiting",
- "starting",
- "reading",
- "sending",
- "keepalive",
- "dnslookup",
- "closing",
- "logging",
- "finishing",
- "idle_cleanup",
-}
-
-// AttributeWorkersState are the possible values that the attribute "workers_state" can have.
-var AttributeWorkersState = struct {
- Busy string
- Idle string
-}{
- "busy",
- "idle",
-}
diff --git a/receiver/apachereceiver/metadata.yaml b/receiver/apachereceiver/metadata.yaml
index d4d2c079dd9b..a8ac95319999 100644
--- a/receiver/apachereceiver/metadata.yaml
+++ b/receiver/apachereceiver/metadata.yaml
@@ -24,6 +24,7 @@ attributes:
- logging
- finishing
- idle_cleanup
+ - unknown
metrics:
apache.uptime:
diff --git a/receiver/apachereceiver/scraper.go b/receiver/apachereceiver/scraper.go
index 822db0909153..6990e363dc36 100644
--- a/receiver/apachereceiver/scraper.go
+++ b/receiver/apachereceiver/scraper.go
@@ -79,9 +79,11 @@ func (r *apacheScraper) scrape(context.Context) (pmetric.Metrics, error) {
case "ConnsTotal":
addPartialIfError(errors, r.mb.RecordApacheCurrentConnectionsDataPoint(now, metricValue, r.cfg.serverName))
case "BusyWorkers":
- addPartialIfError(errors, r.mb.RecordApacheWorkersDataPoint(now, metricValue, r.cfg.serverName, "busy"))
+ addPartialIfError(errors, r.mb.RecordApacheWorkersDataPoint(now, metricValue, r.cfg.serverName,
+ metadata.AttributeWorkersStateBusy))
case "IdleWorkers":
- addPartialIfError(errors, r.mb.RecordApacheWorkersDataPoint(now, metricValue, r.cfg.serverName, "idle"))
+ addPartialIfError(errors, r.mb.RecordApacheWorkersDataPoint(now, metricValue, r.cfg.serverName,
+ metadata.AttributeWorkersStateIdle))
case "Total Accesses":
addPartialIfError(errors, r.mb.RecordApacheRequestsDataPoint(now, metricValue, r.cfg.serverName))
case "Total kBytes":
@@ -139,50 +141,50 @@ func parseStats(resp string) map[string]string {
return metrics
}
-type scoreboardCountsByLabel map[string]int64
+type scoreboardCountsByLabel map[metadata.AttributeScoreboardState]int64
// parseScoreboard quantifies the symbolic mapping of the scoreboard.
func parseScoreboard(values string) scoreboardCountsByLabel {
scoreboard := scoreboardCountsByLabel{
- "waiting": 0,
- "starting": 0,
- "reading": 0,
- "sending": 0,
- "keepalive": 0,
- "dnslookup": 0,
- "closing": 0,
- "logging": 0,
- "finishing": 0,
- "idle_cleanup": 0,
- "open": 0,
+ metadata.AttributeScoreboardStateWaiting: 0,
+ metadata.AttributeScoreboardStateStarting: 0,
+ metadata.AttributeScoreboardStateReading: 0,
+ metadata.AttributeScoreboardStateSending: 0,
+ metadata.AttributeScoreboardStateKeepalive: 0,
+ metadata.AttributeScoreboardStateDnslookup: 0,
+ metadata.AttributeScoreboardStateClosing: 0,
+ metadata.AttributeScoreboardStateLogging: 0,
+ metadata.AttributeScoreboardStateFinishing: 0,
+ metadata.AttributeScoreboardStateIdleCleanup: 0,
+ metadata.AttributeScoreboardStateOpen: 0,
}
for _, char := range values {
switch string(char) {
case "_":
- scoreboard["waiting"]++
+ scoreboard[metadata.AttributeScoreboardStateWaiting]++
case "S":
- scoreboard["starting"]++
+ scoreboard[metadata.AttributeScoreboardStateStarting]++
case "R":
- scoreboard["reading"]++
+ scoreboard[metadata.AttributeScoreboardStateReading]++
case "W":
- scoreboard["sending"]++
+ scoreboard[metadata.AttributeScoreboardStateSending]++
case "K":
- scoreboard["keepalive"]++
+ scoreboard[metadata.AttributeScoreboardStateKeepalive]++
case "D":
- scoreboard["dnslookup"]++
+ scoreboard[metadata.AttributeScoreboardStateDnslookup]++
case "C":
- scoreboard["closing"]++
+ scoreboard[metadata.AttributeScoreboardStateClosing]++
case "L":
- scoreboard["logging"]++
+ scoreboard[metadata.AttributeScoreboardStateLogging]++
case "G":
- scoreboard["finishing"]++
+ scoreboard[metadata.AttributeScoreboardStateFinishing]++
case "I":
- scoreboard["idle_cleanup"]++
+ scoreboard[metadata.AttributeScoreboardStateIdleCleanup]++
case ".":
- scoreboard["open"]++
+ scoreboard[metadata.AttributeScoreboardStateOpen]++
default:
- scoreboard["unknown"]++
+ scoreboard[metadata.AttributeScoreboardStateUnknown]++
}
}
return scoreboard
diff --git a/receiver/apachereceiver/scraper_test.go b/receiver/apachereceiver/scraper_test.go
index 19d7b54f5b9f..1f8a03c81cbf 100644
--- a/receiver/apachereceiver/scraper_test.go
+++ b/receiver/apachereceiver/scraper_test.go
@@ -30,6 +30,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest/golden"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver/internal/metadata"
)
func TestScraper(t *testing.T) {
@@ -74,52 +75,52 @@ func TestParseScoreboard(t *testing.T) {
scoreboard := `S_DD_L_GGG_____W__IIII_C________________W__________________________________.........................____WR______W____W________________________C______________________________________W_W____W______________R_________R________C_________WK_W________K_____W__C__________W___R______.............................................................................................................................`
results := parseScoreboard(scoreboard)
- require.EqualValues(t, int64(150), results["open"])
- require.EqualValues(t, int64(217), results["waiting"])
- require.EqualValues(t, int64(1), results["starting"])
- require.EqualValues(t, int64(4), results["reading"])
- require.EqualValues(t, int64(12), results["sending"])
- require.EqualValues(t, int64(2), results["keepalive"])
- require.EqualValues(t, int64(2), results["dnslookup"])
- require.EqualValues(t, int64(4), results["closing"])
- require.EqualValues(t, int64(1), results["logging"])
- require.EqualValues(t, int64(3), results["finishing"])
- require.EqualValues(t, int64(4), results["idle_cleanup"])
+ require.EqualValues(t, int64(150), results[metadata.AttributeScoreboardStateOpen])
+ require.EqualValues(t, int64(217), results[metadata.AttributeScoreboardStateWaiting])
+ require.EqualValues(t, int64(1), results[metadata.AttributeScoreboardStateStarting])
+ require.EqualValues(t, int64(4), results[metadata.AttributeScoreboardStateReading])
+ require.EqualValues(t, int64(12), results[metadata.AttributeScoreboardStateSending])
+ require.EqualValues(t, int64(2), results[metadata.AttributeScoreboardStateKeepalive])
+ require.EqualValues(t, int64(2), results[metadata.AttributeScoreboardStateDnslookup])
+ require.EqualValues(t, int64(4), results[metadata.AttributeScoreboardStateClosing])
+ require.EqualValues(t, int64(1), results[metadata.AttributeScoreboardStateLogging])
+ require.EqualValues(t, int64(3), results[metadata.AttributeScoreboardStateFinishing])
+ require.EqualValues(t, int64(4), results[metadata.AttributeScoreboardStateIdleCleanup])
})
t.Run("test unknown", func(t *testing.T) {
scoreboard := `qwertyuiopasdfghjklzxcvbnm`
results := parseScoreboard(scoreboard)
- require.EqualValues(t, int64(0), results["open"])
- require.EqualValues(t, int64(0), results["waiting"])
- require.EqualValues(t, int64(0), results["starting"])
- require.EqualValues(t, int64(0), results["reading"])
- require.EqualValues(t, int64(0), results["sending"])
- require.EqualValues(t, int64(0), results["keepalive"])
- require.EqualValues(t, int64(0), results["dnslookup"])
- require.EqualValues(t, int64(0), results["closing"])
- require.EqualValues(t, int64(0), results["logging"])
- require.EqualValues(t, int64(0), results["finishing"])
- require.EqualValues(t, int64(0), results["idle_cleanup"])
- require.EqualValues(t, int64(26), results["unknown"])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateOpen])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateWaiting])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateStarting])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateReading])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateSending])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateKeepalive])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateDnslookup])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateClosing])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateLogging])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateFinishing])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateIdleCleanup])
+ require.EqualValues(t, int64(26), results[metadata.AttributeScoreboardStateUnknown])
})
t.Run("test empty defaults", func(t *testing.T) {
emptyString := ""
results := parseScoreboard(emptyString)
- require.EqualValues(t, int64(0), results["open"])
- require.EqualValues(t, int64(0), results["waiting"])
- require.EqualValues(t, int64(0), results["starting"])
- require.EqualValues(t, int64(0), results["reading"])
- require.EqualValues(t, int64(0), results["sending"])
- require.EqualValues(t, int64(0), results["keepalive"])
- require.EqualValues(t, int64(0), results["dnslookup"])
- require.EqualValues(t, int64(0), results["closing"])
- require.EqualValues(t, int64(0), results["logging"])
- require.EqualValues(t, int64(0), results["finishing"])
- require.EqualValues(t, int64(0), results["idle_cleanup"])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateOpen])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateWaiting])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateStarting])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateReading])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateSending])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateKeepalive])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateDnslookup])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateClosing])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateLogging])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateFinishing])
+ require.EqualValues(t, int64(0), results[metadata.AttributeScoreboardStateIdleCleanup])
})
}
diff --git a/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go b/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go
index 0243aaa217fc..ede0c881466f 100644
--- a/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go
@@ -55,6 +55,104 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeHTTPMethod specifies the a value http.method attribute.
+type AttributeHTTPMethod int
+
+const (
+ _ AttributeHTTPMethod = iota
+ AttributeHTTPMethodCOPY
+ AttributeHTTPMethodDELETE
+ AttributeHTTPMethodGET
+ AttributeHTTPMethodHEAD
+ AttributeHTTPMethodOPTIONS
+ AttributeHTTPMethodPOST
+ AttributeHTTPMethodPUT
+)
+
+// String returns the string representation of the AttributeHTTPMethod.
+func (av AttributeHTTPMethod) String() string {
+ switch av {
+ case AttributeHTTPMethodCOPY:
+ return "COPY"
+ case AttributeHTTPMethodDELETE:
+ return "DELETE"
+ case AttributeHTTPMethodGET:
+ return "GET"
+ case AttributeHTTPMethodHEAD:
+ return "HEAD"
+ case AttributeHTTPMethodOPTIONS:
+ return "OPTIONS"
+ case AttributeHTTPMethodPOST:
+ return "POST"
+ case AttributeHTTPMethodPUT:
+ return "PUT"
+ }
+ return ""
+}
+
+// MapAttributeHTTPMethod is a helper map of string to AttributeHTTPMethod attribute value.
+var MapAttributeHTTPMethod = map[string]AttributeHTTPMethod{
+ "COPY": AttributeHTTPMethodCOPY,
+ "DELETE": AttributeHTTPMethodDELETE,
+ "GET": AttributeHTTPMethodGET,
+ "HEAD": AttributeHTTPMethodHEAD,
+ "OPTIONS": AttributeHTTPMethodOPTIONS,
+ "POST": AttributeHTTPMethodPOST,
+ "PUT": AttributeHTTPMethodPUT,
+}
+
+// AttributeOperation specifies the a value operation attribute.
+type AttributeOperation int
+
+const (
+ _ AttributeOperation = iota
+ AttributeOperationWrites
+ AttributeOperationReads
+)
+
+// String returns the string representation of the AttributeOperation.
+func (av AttributeOperation) String() string {
+ switch av {
+ case AttributeOperationWrites:
+ return "writes"
+ case AttributeOperationReads:
+ return "reads"
+ }
+ return ""
+}
+
+// MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
+var MapAttributeOperation = map[string]AttributeOperation{
+ "writes": AttributeOperationWrites,
+ "reads": AttributeOperationReads,
+}
+
+// AttributeView specifies the a value view attribute.
+type AttributeView int
+
+const (
+ _ AttributeView = iota
+ AttributeViewTemporaryViewReads
+ AttributeViewViewReads
+)
+
+// String returns the string representation of the AttributeView.
+func (av AttributeView) String() string {
+ switch av {
+ case AttributeViewTemporaryViewReads:
+ return "temporary_view_reads"
+ case AttributeViewViewReads:
+ return "view_reads"
+ }
+ return ""
+}
+
+// MapAttributeView is a helper map of string to AttributeView attribute value.
+var MapAttributeView = map[string]AttributeView{
+ "temporary_view_reads": AttributeViewTemporaryViewReads,
+ "view_reads": AttributeViewViewReads,
+}
+
type metricCouchdbAverageRequestTime struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -583,8 +681,8 @@ func (mb *MetricsBuilder) RecordCouchdbDatabaseOpenDataPoint(ts pcommon.Timestam
}
// RecordCouchdbDatabaseOperationsDataPoint adds a data point to couchdb.database.operations metric.
-func (mb *MetricsBuilder) RecordCouchdbDatabaseOperationsDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) {
- mb.metricCouchdbDatabaseOperations.recordDataPoint(mb.startTime, ts, val, operationAttributeValue)
+func (mb *MetricsBuilder) RecordCouchdbDatabaseOperationsDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) {
+ mb.metricCouchdbDatabaseOperations.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String())
}
// RecordCouchdbFileDescriptorOpenDataPoint adds a data point to couchdb.file_descriptor.open metric.
@@ -598,8 +696,8 @@ func (mb *MetricsBuilder) RecordCouchdbHttpdBulkRequestsDataPoint(ts pcommon.Tim
}
// RecordCouchdbHttpdRequestsDataPoint adds a data point to couchdb.httpd.requests metric.
-func (mb *MetricsBuilder) RecordCouchdbHttpdRequestsDataPoint(ts pcommon.Timestamp, val int64, httpMethodAttributeValue string) {
- mb.metricCouchdbHttpdRequests.recordDataPoint(mb.startTime, ts, val, httpMethodAttributeValue)
+func (mb *MetricsBuilder) RecordCouchdbHttpdRequestsDataPoint(ts pcommon.Timestamp, val int64, httpMethodAttributeValue AttributeHTTPMethod) {
+ mb.metricCouchdbHttpdRequests.recordDataPoint(mb.startTime, ts, val, httpMethodAttributeValue.String())
}
// RecordCouchdbHttpdResponsesDataPoint adds a data point to couchdb.httpd.responses metric.
@@ -608,8 +706,8 @@ func (mb *MetricsBuilder) RecordCouchdbHttpdResponsesDataPoint(ts pcommon.Timest
}
// RecordCouchdbHttpdViewsDataPoint adds a data point to couchdb.httpd.views metric.
-func (mb *MetricsBuilder) RecordCouchdbHttpdViewsDataPoint(ts pcommon.Timestamp, val int64, viewAttributeValue string) {
- mb.metricCouchdbHttpdViews.recordDataPoint(mb.startTime, ts, val, viewAttributeValue)
+func (mb *MetricsBuilder) RecordCouchdbHttpdViewsDataPoint(ts pcommon.Timestamp, val int64, viewAttributeValue AttributeView) {
+ mb.metricCouchdbHttpdViews.recordDataPoint(mb.startTime, ts, val, viewAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
@@ -640,40 +738,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeHTTPMethod are the possible values that the attribute "http.method" can have.
-var AttributeHTTPMethod = struct {
- COPY string
- DELETE string
- GET string
- HEAD string
- OPTIONS string
- POST string
- PUT string
-}{
- "COPY",
- "DELETE",
- "GET",
- "HEAD",
- "OPTIONS",
- "POST",
- "PUT",
-}
-
-// AttributeOperation are the possible values that the attribute "operation" can have.
-var AttributeOperation = struct {
- Writes string
- Reads string
-}{
- "writes",
- "reads",
-}
-
-// AttributeView are the possible values that the attribute "view" can have.
-var AttributeView = struct {
- TemporaryViewReads string
- ViewReads string
-}{
- "temporary_view_reads",
- "view_reads",
-}
diff --git a/receiver/couchdbreceiver/metrics.go b/receiver/couchdbreceiver/metrics.go
index 8c047c8e7fe2..e3b0cd2fcbea 100644
--- a/receiver/couchdbreceiver/metrics.go
+++ b/receiver/couchdbreceiver/metrics.go
@@ -56,9 +56,8 @@ func (c *couchdbScraper) recordCouchdbHttpdBulkRequestsDataPoint(now pcommon.Tim
}
func (c *couchdbScraper) recordCouchdbHttpdRequestsDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) {
- methods := []string{metadata.AttributeHTTPMethod.COPY, metadata.AttributeHTTPMethod.DELETE, metadata.AttributeHTTPMethod.GET, metadata.AttributeHTTPMethod.HEAD, metadata.AttributeHTTPMethod.OPTIONS, metadata.AttributeHTTPMethod.POST, metadata.AttributeHTTPMethod.PUT}
- for _, method := range methods {
- httpdRequestMethodKey := []string{"httpd_request_methods", method, "value"}
+ for methodVal, method := range metadata.MapAttributeHTTPMethod {
+ httpdRequestMethodKey := []string{"httpd_request_methods", methodVal, "value"}
httpdRequestMethodValue, err := getValueFromBody(httpdRequestMethodKey, stats)
if err != nil {
errors.AddPartial(1, err)
@@ -94,9 +93,8 @@ func (c *couchdbScraper) recordCouchdbHttpdResponsesDataPoint(now pcommon.Timest
}
func (c *couchdbScraper) recordCouchdbHttpdViewsDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) {
- views := []string{metadata.AttributeView.TemporaryViewReads, metadata.AttributeView.ViewReads}
- for _, view := range views {
- viewKey := []string{"httpd", view, "value"}
+ for viewVal, view := range metadata.MapAttributeView {
+ viewKey := []string{"httpd", viewVal, "value"}
viewValue, err := getValueFromBody(viewKey, stats)
if err != nil {
errors.AddPartial(1, err)
@@ -145,7 +143,7 @@ func (c *couchdbScraper) recordCouchdbFileDescriptorOpenDataPoint(now pcommon.Ti
}
func (c *couchdbScraper) recordCouchdbDatabaseOperationsDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) {
- operations := []string{metadata.AttributeOperation.Reads, metadata.AttributeOperation.Writes}
+ operations := []metadata.AttributeOperation{metadata.AttributeOperationReads, metadata.AttributeOperationWrites}
keyPaths := [][]string{{"database_reads", "value"}, {"database_writes", "value"}}
for i := 0; i < len(operations); i++ {
key := keyPaths[i]
diff --git a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go
index 349e6451194f..f839729ac40b 100644
--- a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go
@@ -139,6 +139,314 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeCacheName specifies the a value cache_name attribute.
+type AttributeCacheName int
+
+const (
+ _ AttributeCacheName = iota
+ AttributeCacheNameFielddata
+ AttributeCacheNameQuery
+)
+
+// String returns the string representation of the AttributeCacheName.
+func (av AttributeCacheName) String() string {
+ switch av {
+ case AttributeCacheNameFielddata:
+ return "fielddata"
+ case AttributeCacheNameQuery:
+ return "query"
+ }
+ return ""
+}
+
+// MapAttributeCacheName is a helper map of string to AttributeCacheName attribute value.
+var MapAttributeCacheName = map[string]AttributeCacheName{
+ "fielddata": AttributeCacheNameFielddata,
+ "query": AttributeCacheNameQuery,
+}
+
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionReceived
+ AttributeDirectionSent
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionReceived:
+ return "received"
+ case AttributeDirectionSent:
+ return "sent"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "received": AttributeDirectionReceived,
+ "sent": AttributeDirectionSent,
+}
+
+// AttributeDiskUsageState specifies the a value disk_usage_state attribute.
+type AttributeDiskUsageState int
+
+const (
+ _ AttributeDiskUsageState = iota
+ AttributeDiskUsageStateUsed
+ AttributeDiskUsageStateFree
+)
+
+// String returns the string representation of the AttributeDiskUsageState.
+func (av AttributeDiskUsageState) String() string {
+ switch av {
+ case AttributeDiskUsageStateUsed:
+ return "used"
+ case AttributeDiskUsageStateFree:
+ return "free"
+ }
+ return ""
+}
+
+// MapAttributeDiskUsageState is a helper map of string to AttributeDiskUsageState attribute value.
+var MapAttributeDiskUsageState = map[string]AttributeDiskUsageState{
+ "used": AttributeDiskUsageStateUsed,
+ "free": AttributeDiskUsageStateFree,
+}
+
+// AttributeDocumentState specifies the a value document_state attribute.
+type AttributeDocumentState int
+
+const (
+ _ AttributeDocumentState = iota
+ AttributeDocumentStateActive
+ AttributeDocumentStateDeleted
+)
+
+// String returns the string representation of the AttributeDocumentState.
+func (av AttributeDocumentState) String() string {
+ switch av {
+ case AttributeDocumentStateActive:
+ return "active"
+ case AttributeDocumentStateDeleted:
+ return "deleted"
+ }
+ return ""
+}
+
+// MapAttributeDocumentState is a helper map of string to AttributeDocumentState attribute value.
+var MapAttributeDocumentState = map[string]AttributeDocumentState{
+ "active": AttributeDocumentStateActive,
+ "deleted": AttributeDocumentStateDeleted,
+}
+
+// AttributeFsDirection specifies the a value fs_direction attribute.
+type AttributeFsDirection int
+
+const (
+ _ AttributeFsDirection = iota
+ AttributeFsDirectionRead
+ AttributeFsDirectionWrite
+)
+
+// String returns the string representation of the AttributeFsDirection.
+func (av AttributeFsDirection) String() string {
+ switch av {
+ case AttributeFsDirectionRead:
+ return "read"
+ case AttributeFsDirectionWrite:
+ return "write"
+ }
+ return ""
+}
+
+// MapAttributeFsDirection is a helper map of string to AttributeFsDirection attribute value.
+var MapAttributeFsDirection = map[string]AttributeFsDirection{
+ "read": AttributeFsDirectionRead,
+ "write": AttributeFsDirectionWrite,
+}
+
+// AttributeHealthStatus specifies the a value health_status attribute.
+type AttributeHealthStatus int
+
+const (
+ _ AttributeHealthStatus = iota
+ AttributeHealthStatusGreen
+ AttributeHealthStatusYellow
+ AttributeHealthStatusRed
+)
+
+// String returns the string representation of the AttributeHealthStatus.
+func (av AttributeHealthStatus) String() string {
+ switch av {
+ case AttributeHealthStatusGreen:
+ return "green"
+ case AttributeHealthStatusYellow:
+ return "yellow"
+ case AttributeHealthStatusRed:
+ return "red"
+ }
+ return ""
+}
+
+// MapAttributeHealthStatus is a helper map of string to AttributeHealthStatus attribute value.
+var MapAttributeHealthStatus = map[string]AttributeHealthStatus{
+ "green": AttributeHealthStatusGreen,
+ "yellow": AttributeHealthStatusYellow,
+ "red": AttributeHealthStatusRed,
+}
+
+// AttributeOperation specifies the a value operation attribute.
+type AttributeOperation int
+
+const (
+ _ AttributeOperation = iota
+ AttributeOperationIndex
+ AttributeOperationDelete
+ AttributeOperationGet
+ AttributeOperationQuery
+ AttributeOperationFetch
+ AttributeOperationScroll
+ AttributeOperationSuggest
+ AttributeOperationMerge
+ AttributeOperationRefresh
+ AttributeOperationFlush
+ AttributeOperationWarmer
+)
+
+// String returns the string representation of the AttributeOperation.
+func (av AttributeOperation) String() string {
+ switch av {
+ case AttributeOperationIndex:
+ return "index"
+ case AttributeOperationDelete:
+ return "delete"
+ case AttributeOperationGet:
+ return "get"
+ case AttributeOperationQuery:
+ return "query"
+ case AttributeOperationFetch:
+ return "fetch"
+ case AttributeOperationScroll:
+ return "scroll"
+ case AttributeOperationSuggest:
+ return "suggest"
+ case AttributeOperationMerge:
+ return "merge"
+ case AttributeOperationRefresh:
+ return "refresh"
+ case AttributeOperationFlush:
+ return "flush"
+ case AttributeOperationWarmer:
+ return "warmer"
+ }
+ return ""
+}
+
+// MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
+var MapAttributeOperation = map[string]AttributeOperation{
+ "index": AttributeOperationIndex,
+ "delete": AttributeOperationDelete,
+ "get": AttributeOperationGet,
+ "query": AttributeOperationQuery,
+ "fetch": AttributeOperationFetch,
+ "scroll": AttributeOperationScroll,
+ "suggest": AttributeOperationSuggest,
+ "merge": AttributeOperationMerge,
+ "refresh": AttributeOperationRefresh,
+ "flush": AttributeOperationFlush,
+ "warmer": AttributeOperationWarmer,
+}
+
+// AttributeShardState specifies the a value shard_state attribute.
+type AttributeShardState int
+
+const (
+ _ AttributeShardState = iota
+ AttributeShardStateActive
+ AttributeShardStateRelocating
+ AttributeShardStateInitializing
+ AttributeShardStateUnassigned
+)
+
+// String returns the string representation of the AttributeShardState.
+func (av AttributeShardState) String() string {
+ switch av {
+ case AttributeShardStateActive:
+ return "active"
+ case AttributeShardStateRelocating:
+ return "relocating"
+ case AttributeShardStateInitializing:
+ return "initializing"
+ case AttributeShardStateUnassigned:
+ return "unassigned"
+ }
+ return ""
+}
+
+// MapAttributeShardState is a helper map of string to AttributeShardState attribute value.
+var MapAttributeShardState = map[string]AttributeShardState{
+ "active": AttributeShardStateActive,
+ "relocating": AttributeShardStateRelocating,
+ "initializing": AttributeShardStateInitializing,
+ "unassigned": AttributeShardStateUnassigned,
+}
+
+// AttributeTaskState specifies the a value task_state attribute.
+type AttributeTaskState int
+
+const (
+ _ AttributeTaskState = iota
+ AttributeTaskStateRejected
+ AttributeTaskStateCompleted
+)
+
+// String returns the string representation of the AttributeTaskState.
+func (av AttributeTaskState) String() string {
+ switch av {
+ case AttributeTaskStateRejected:
+ return "rejected"
+ case AttributeTaskStateCompleted:
+ return "completed"
+ }
+ return ""
+}
+
+// MapAttributeTaskState is a helper map of string to AttributeTaskState attribute value.
+var MapAttributeTaskState = map[string]AttributeTaskState{
+ "rejected": AttributeTaskStateRejected,
+ "completed": AttributeTaskStateCompleted,
+}
+
+// AttributeThreadState specifies the a value thread_state attribute.
+type AttributeThreadState int
+
+const (
+ _ AttributeThreadState = iota
+ AttributeThreadStateActive
+ AttributeThreadStateIdle
+)
+
+// String returns the string representation of the AttributeThreadState.
+func (av AttributeThreadState) String() string {
+ switch av {
+ case AttributeThreadStateActive:
+ return "active"
+ case AttributeThreadStateIdle:
+ return "idle"
+ }
+ return ""
+}
+
+// MapAttributeThreadState is a helper map of string to AttributeThreadState attribute value.
+var MapAttributeThreadState = map[string]AttributeThreadState{
+ "active": AttributeThreadStateActive,
+ "idle": AttributeThreadStateIdle,
+}
+
type metricElasticsearchClusterDataNodes struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -1797,8 +2105,8 @@ func (mb *MetricsBuilder) RecordElasticsearchClusterDataNodesDataPoint(ts pcommo
}
// RecordElasticsearchClusterHealthDataPoint adds a data point to elasticsearch.cluster.health metric.
-func (mb *MetricsBuilder) RecordElasticsearchClusterHealthDataPoint(ts pcommon.Timestamp, val int64, healthStatusAttributeValue string) {
- mb.metricElasticsearchClusterHealth.recordDataPoint(mb.startTime, ts, val, healthStatusAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchClusterHealthDataPoint(ts pcommon.Timestamp, val int64, healthStatusAttributeValue AttributeHealthStatus) {
+ mb.metricElasticsearchClusterHealth.recordDataPoint(mb.startTime, ts, val, healthStatusAttributeValue.String())
}
// RecordElasticsearchClusterNodesDataPoint adds a data point to elasticsearch.cluster.nodes metric.
@@ -1807,18 +2115,18 @@ func (mb *MetricsBuilder) RecordElasticsearchClusterNodesDataPoint(ts pcommon.Ti
}
// RecordElasticsearchClusterShardsDataPoint adds a data point to elasticsearch.cluster.shards metric.
-func (mb *MetricsBuilder) RecordElasticsearchClusterShardsDataPoint(ts pcommon.Timestamp, val int64, shardStateAttributeValue string) {
- mb.metricElasticsearchClusterShards.recordDataPoint(mb.startTime, ts, val, shardStateAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchClusterShardsDataPoint(ts pcommon.Timestamp, val int64, shardStateAttributeValue AttributeShardState) {
+ mb.metricElasticsearchClusterShards.recordDataPoint(mb.startTime, ts, val, shardStateAttributeValue.String())
}
// RecordElasticsearchNodeCacheEvictionsDataPoint adds a data point to elasticsearch.node.cache.evictions metric.
-func (mb *MetricsBuilder) RecordElasticsearchNodeCacheEvictionsDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue string) {
- mb.metricElasticsearchNodeCacheEvictions.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchNodeCacheEvictionsDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue AttributeCacheName) {
+ mb.metricElasticsearchNodeCacheEvictions.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue.String())
}
// RecordElasticsearchNodeCacheMemoryUsageDataPoint adds a data point to elasticsearch.node.cache.memory.usage metric.
-func (mb *MetricsBuilder) RecordElasticsearchNodeCacheMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue string) {
- mb.metricElasticsearchNodeCacheMemoryUsage.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchNodeCacheMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue AttributeCacheName) {
+ mb.metricElasticsearchNodeCacheMemoryUsage.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue.String())
}
// RecordElasticsearchNodeClusterConnectionsDataPoint adds a data point to elasticsearch.node.cluster.connections metric.
@@ -1827,13 +2135,13 @@ func (mb *MetricsBuilder) RecordElasticsearchNodeClusterConnectionsDataPoint(ts
}
// RecordElasticsearchNodeClusterIoDataPoint adds a data point to elasticsearch.node.cluster.io metric.
-func (mb *MetricsBuilder) RecordElasticsearchNodeClusterIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) {
- mb.metricElasticsearchNodeClusterIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchNodeClusterIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
+ mb.metricElasticsearchNodeClusterIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordElasticsearchNodeDocumentsDataPoint adds a data point to elasticsearch.node.documents metric.
-func (mb *MetricsBuilder) RecordElasticsearchNodeDocumentsDataPoint(ts pcommon.Timestamp, val int64, documentStateAttributeValue string) {
- mb.metricElasticsearchNodeDocuments.recordDataPoint(mb.startTime, ts, val, documentStateAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchNodeDocumentsDataPoint(ts pcommon.Timestamp, val int64, documentStateAttributeValue AttributeDocumentState) {
+ mb.metricElasticsearchNodeDocuments.recordDataPoint(mb.startTime, ts, val, documentStateAttributeValue.String())
}
// RecordElasticsearchNodeFsDiskAvailableDataPoint adds a data point to elasticsearch.node.fs.disk.available metric.
@@ -1852,13 +2160,13 @@ func (mb *MetricsBuilder) RecordElasticsearchNodeOpenFilesDataPoint(ts pcommon.T
}
// RecordElasticsearchNodeOperationsCompletedDataPoint adds a data point to elasticsearch.node.operations.completed metric.
-func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsCompletedDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) {
- mb.metricElasticsearchNodeOperationsCompleted.recordDataPoint(mb.startTime, ts, val, operationAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsCompletedDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) {
+ mb.metricElasticsearchNodeOperationsCompleted.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String())
}
// RecordElasticsearchNodeOperationsTimeDataPoint adds a data point to elasticsearch.node.operations.time metric.
-func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsTimeDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) {
- mb.metricElasticsearchNodeOperationsTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsTimeDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) {
+ mb.metricElasticsearchNodeOperationsTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String())
}
// RecordElasticsearchNodeShardsSizeDataPoint adds a data point to elasticsearch.node.shards.size metric.
@@ -1867,8 +2175,8 @@ func (mb *MetricsBuilder) RecordElasticsearchNodeShardsSizeDataPoint(ts pcommon.
}
// RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint adds a data point to elasticsearch.node.thread_pool.tasks.finished metric.
-func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, taskStateAttributeValue string) {
- mb.metricElasticsearchNodeThreadPoolTasksFinished.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue, taskStateAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, taskStateAttributeValue AttributeTaskState) {
+ mb.metricElasticsearchNodeThreadPoolTasksFinished.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue, taskStateAttributeValue.String())
}
// RecordElasticsearchNodeThreadPoolTasksQueuedDataPoint adds a data point to elasticsearch.node.thread_pool.tasks.queued metric.
@@ -1877,8 +2185,8 @@ func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolTasksQueuedDataPoint(
}
// RecordElasticsearchNodeThreadPoolThreadsDataPoint adds a data point to elasticsearch.node.thread_pool.threads metric.
-func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolThreadsDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, threadStateAttributeValue string) {
- mb.metricElasticsearchNodeThreadPoolThreads.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue, threadStateAttributeValue)
+func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolThreadsDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, threadStateAttributeValue AttributeThreadState) {
+ mb.metricElasticsearchNodeThreadPoolThreads.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue, threadStateAttributeValue.String())
}
// RecordJvmClassesLoadedDataPoint adds a data point to jvm.classes.loaded metric.
@@ -1997,117 +2305,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeCacheName are the possible values that the attribute "cache_name" can have.
-var AttributeCacheName = struct {
- Fielddata string
- Query string
-}{
- "fielddata",
- "query",
-}
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Received string
- Sent string
-}{
- "received",
- "sent",
-}
-
-// AttributeDiskUsageState are the possible values that the attribute "disk_usage_state" can have.
-var AttributeDiskUsageState = struct {
- Used string
- Free string
-}{
- "used",
- "free",
-}
-
-// AttributeDocumentState are the possible values that the attribute "document_state" can have.
-var AttributeDocumentState = struct {
- Active string
- Deleted string
-}{
- "active",
- "deleted",
-}
-
-// AttributeFsDirection are the possible values that the attribute "fs_direction" can have.
-var AttributeFsDirection = struct {
- Read string
- Write string
-}{
- "read",
- "write",
-}
-
-// AttributeHealthStatus are the possible values that the attribute "health_status" can have.
-var AttributeHealthStatus = struct {
- Green string
- Yellow string
- Red string
-}{
- "green",
- "yellow",
- "red",
-}
-
-// AttributeOperation are the possible values that the attribute "operation" can have.
-var AttributeOperation = struct {
- Index string
- Delete string
- Get string
- Query string
- Fetch string
- Scroll string
- Suggest string
- Merge string
- Refresh string
- Flush string
- Warmer string
-}{
- "index",
- "delete",
- "get",
- "query",
- "fetch",
- "scroll",
- "suggest",
- "merge",
- "refresh",
- "flush",
- "warmer",
-}
-
-// AttributeShardState are the possible values that the attribute "shard_state" can have.
-var AttributeShardState = struct {
- Active string
- Relocating string
- Initializing string
- Unassigned string
-}{
- "active",
- "relocating",
- "initializing",
- "unassigned",
-}
-
-// AttributeTaskState are the possible values that the attribute "task_state" can have.
-var AttributeTaskState = struct {
- Rejected string
- Completed string
-}{
- "rejected",
- "completed",
-}
-
-// AttributeThreadState are the possible values that the attribute "thread_state" can have.
-var AttributeThreadState = struct {
- Active string
- Idle string
-}{
- "active",
- "idle",
-}
diff --git a/receiver/elasticsearchreceiver/scraper.go b/receiver/elasticsearchreceiver/scraper.go
index 684038236063..870c4a452028 100644
--- a/receiver/elasticsearchreceiver/scraper.go
+++ b/receiver/elasticsearchreceiver/scraper.go
@@ -90,59 +90,59 @@ func (r *elasticsearchScraper) scrapeNodeMetrics(ctx context.Context, now pcommo
ilms := rm.ScopeMetrics().AppendEmpty()
ilms.Scope().SetName(instrumentationLibraryName)
- r.mb.RecordElasticsearchNodeCacheMemoryUsageDataPoint(now, info.Indices.FieldDataCache.MemorySizeInBy, metadata.AttributeCacheName.Fielddata)
- r.mb.RecordElasticsearchNodeCacheMemoryUsageDataPoint(now, info.Indices.QueryCache.MemorySizeInBy, metadata.AttributeCacheName.Query)
+ r.mb.RecordElasticsearchNodeCacheMemoryUsageDataPoint(now, info.Indices.FieldDataCache.MemorySizeInBy, metadata.AttributeCacheNameFielddata)
+ r.mb.RecordElasticsearchNodeCacheMemoryUsageDataPoint(now, info.Indices.QueryCache.MemorySizeInBy, metadata.AttributeCacheNameQuery)
- r.mb.RecordElasticsearchNodeCacheEvictionsDataPoint(now, info.Indices.FieldDataCache.Evictions, metadata.AttributeCacheName.Fielddata)
- r.mb.RecordElasticsearchNodeCacheEvictionsDataPoint(now, info.Indices.QueryCache.Evictions, metadata.AttributeCacheName.Query)
+ r.mb.RecordElasticsearchNodeCacheEvictionsDataPoint(now, info.Indices.FieldDataCache.Evictions, metadata.AttributeCacheNameFielddata)
+ r.mb.RecordElasticsearchNodeCacheEvictionsDataPoint(now, info.Indices.QueryCache.Evictions, metadata.AttributeCacheNameQuery)
r.mb.RecordElasticsearchNodeFsDiskAvailableDataPoint(now, info.FS.Total.AvailableBytes)
- r.mb.RecordElasticsearchNodeClusterIoDataPoint(now, info.TransportStats.ReceivedBytes, metadata.AttributeDirection.Received)
- r.mb.RecordElasticsearchNodeClusterIoDataPoint(now, info.TransportStats.SentBytes, metadata.AttributeDirection.Sent)
+ r.mb.RecordElasticsearchNodeClusterIoDataPoint(now, info.TransportStats.ReceivedBytes, metadata.AttributeDirectionReceived)
+ r.mb.RecordElasticsearchNodeClusterIoDataPoint(now, info.TransportStats.SentBytes, metadata.AttributeDirectionSent)
r.mb.RecordElasticsearchNodeClusterConnectionsDataPoint(now, info.TransportStats.OpenConnections)
r.mb.RecordElasticsearchNodeHTTPConnectionsDataPoint(now, info.HTTPStats.OpenConnections)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.IndexingOperations.IndexTotal, metadata.AttributeOperation.Index)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.IndexingOperations.DeleteTotal, metadata.AttributeOperation.Delete)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.GetOperation.Total, metadata.AttributeOperation.Get)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.SearchOperations.QueryTotal, metadata.AttributeOperation.Query)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.SearchOperations.FetchTotal, metadata.AttributeOperation.Fetch)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.SearchOperations.ScrollTotal, metadata.AttributeOperation.Scroll)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.SearchOperations.SuggestTotal, metadata.AttributeOperation.Suggest)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.MergeOperations.Total, metadata.AttributeOperation.Merge)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.RefreshOperations.Total, metadata.AttributeOperation.Refresh)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.FlushOperations.Total, metadata.AttributeOperation.Flush)
- r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.WarmerOperations.Total, metadata.AttributeOperation.Warmer)
-
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.IndexingOperations.IndexTimeInMs, metadata.AttributeOperation.Index)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.IndexingOperations.DeleteTimeInMs, metadata.AttributeOperation.Delete)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.GetOperation.TotalTimeInMs, metadata.AttributeOperation.Get)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.SearchOperations.QueryTimeInMs, metadata.AttributeOperation.Query)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.SearchOperations.FetchTimeInMs, metadata.AttributeOperation.Fetch)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.SearchOperations.ScrollTimeInMs, metadata.AttributeOperation.Scroll)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.SearchOperations.SuggestTimeInMs, metadata.AttributeOperation.Suggest)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.MergeOperations.TotalTimeInMs, metadata.AttributeOperation.Merge)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.RefreshOperations.TotalTimeInMs, metadata.AttributeOperation.Refresh)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.FlushOperations.TotalTimeInMs, metadata.AttributeOperation.Flush)
- r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.WarmerOperations.TotalTimeInMs, metadata.AttributeOperation.Warmer)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.IndexingOperations.IndexTotal, metadata.AttributeOperationIndex)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.IndexingOperations.DeleteTotal, metadata.AttributeOperationDelete)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.GetOperation.Total, metadata.AttributeOperationGet)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.SearchOperations.QueryTotal, metadata.AttributeOperationQuery)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.SearchOperations.FetchTotal, metadata.AttributeOperationFetch)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.SearchOperations.ScrollTotal, metadata.AttributeOperationScroll)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.SearchOperations.SuggestTotal, metadata.AttributeOperationSuggest)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.MergeOperations.Total, metadata.AttributeOperationMerge)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.RefreshOperations.Total, metadata.AttributeOperationRefresh)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.FlushOperations.Total, metadata.AttributeOperationFlush)
+ r.mb.RecordElasticsearchNodeOperationsCompletedDataPoint(now, info.Indices.WarmerOperations.Total, metadata.AttributeOperationWarmer)
+
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.IndexingOperations.IndexTimeInMs, metadata.AttributeOperationIndex)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.IndexingOperations.DeleteTimeInMs, metadata.AttributeOperationDelete)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.GetOperation.TotalTimeInMs, metadata.AttributeOperationGet)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.SearchOperations.QueryTimeInMs, metadata.AttributeOperationQuery)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.SearchOperations.FetchTimeInMs, metadata.AttributeOperationFetch)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.SearchOperations.ScrollTimeInMs, metadata.AttributeOperationScroll)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.SearchOperations.SuggestTimeInMs, metadata.AttributeOperationSuggest)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.MergeOperations.TotalTimeInMs, metadata.AttributeOperationMerge)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.RefreshOperations.TotalTimeInMs, metadata.AttributeOperationRefresh)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.FlushOperations.TotalTimeInMs, metadata.AttributeOperationFlush)
+ r.mb.RecordElasticsearchNodeOperationsTimeDataPoint(now, info.Indices.WarmerOperations.TotalTimeInMs, metadata.AttributeOperationWarmer)
r.mb.RecordElasticsearchNodeShardsSizeDataPoint(now, info.Indices.StoreInfo.SizeInBy)
for tpName, tpInfo := range info.ThreadPoolInfo {
- r.mb.RecordElasticsearchNodeThreadPoolThreadsDataPoint(now, tpInfo.ActiveThreads, tpName, metadata.AttributeThreadState.Active)
- r.mb.RecordElasticsearchNodeThreadPoolThreadsDataPoint(now, tpInfo.TotalThreads-tpInfo.ActiveThreads, tpName, metadata.AttributeThreadState.Idle)
+ r.mb.RecordElasticsearchNodeThreadPoolThreadsDataPoint(now, tpInfo.ActiveThreads, tpName, metadata.AttributeThreadStateActive)
+ r.mb.RecordElasticsearchNodeThreadPoolThreadsDataPoint(now, tpInfo.TotalThreads-tpInfo.ActiveThreads, tpName, metadata.AttributeThreadStateIdle)
r.mb.RecordElasticsearchNodeThreadPoolTasksQueuedDataPoint(now, tpInfo.QueuedTasks, tpName)
- r.mb.RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint(now, tpInfo.CompletedTasks, tpName, metadata.AttributeTaskState.Completed)
- r.mb.RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint(now, tpInfo.RejectedTasks, tpName, metadata.AttributeTaskState.Rejected)
+ r.mb.RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint(now, tpInfo.CompletedTasks, tpName, metadata.AttributeTaskStateCompleted)
+ r.mb.RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint(now, tpInfo.RejectedTasks, tpName, metadata.AttributeTaskStateRejected)
}
- r.mb.RecordElasticsearchNodeDocumentsDataPoint(now, info.Indices.DocumentStats.ActiveCount, metadata.AttributeDocumentState.Active)
- r.mb.RecordElasticsearchNodeDocumentsDataPoint(now, info.Indices.DocumentStats.DeletedCount, metadata.AttributeDocumentState.Deleted)
+ r.mb.RecordElasticsearchNodeDocumentsDataPoint(now, info.Indices.DocumentStats.ActiveCount, metadata.AttributeDocumentStateActive)
+ r.mb.RecordElasticsearchNodeDocumentsDataPoint(now, info.Indices.DocumentStats.DeletedCount, metadata.AttributeDocumentStateDeleted)
r.mb.RecordElasticsearchNodeOpenFilesDataPoint(now, info.ProcessStats.OpenFileDescriptorsCount)
@@ -197,24 +197,24 @@ func (r *elasticsearchScraper) scrapeClusterMetrics(ctx context.Context, now pco
r.mb.RecordElasticsearchClusterDataNodesDataPoint(now, clusterHealth.DataNodeCount)
- r.mb.RecordElasticsearchClusterShardsDataPoint(now, clusterHealth.ActiveShards, metadata.AttributeShardState.Active)
- r.mb.RecordElasticsearchClusterShardsDataPoint(now, clusterHealth.InitializingShards, metadata.AttributeShardState.Initializing)
- r.mb.RecordElasticsearchClusterShardsDataPoint(now, clusterHealth.RelocatingShards, metadata.AttributeShardState.Relocating)
- r.mb.RecordElasticsearchClusterShardsDataPoint(now, clusterHealth.UnassignedShards, metadata.AttributeShardState.Unassigned)
+ r.mb.RecordElasticsearchClusterShardsDataPoint(now, clusterHealth.ActiveShards, metadata.AttributeShardStateActive)
+ r.mb.RecordElasticsearchClusterShardsDataPoint(now, clusterHealth.InitializingShards, metadata.AttributeShardStateInitializing)
+ r.mb.RecordElasticsearchClusterShardsDataPoint(now, clusterHealth.RelocatingShards, metadata.AttributeShardStateRelocating)
+ r.mb.RecordElasticsearchClusterShardsDataPoint(now, clusterHealth.UnassignedShards, metadata.AttributeShardStateUnassigned)
switch clusterHealth.Status {
case "green":
- r.mb.RecordElasticsearchClusterHealthDataPoint(now, 1, metadata.AttributeHealthStatus.Green)
- r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatus.Yellow)
- r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatus.Red)
+ r.mb.RecordElasticsearchClusterHealthDataPoint(now, 1, metadata.AttributeHealthStatusGreen)
+ r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatusYellow)
+ r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatusRed)
case "yellow":
- r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatus.Green)
- r.mb.RecordElasticsearchClusterHealthDataPoint(now, 1, metadata.AttributeHealthStatus.Yellow)
- r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatus.Red)
+ r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatusGreen)
+ r.mb.RecordElasticsearchClusterHealthDataPoint(now, 1, metadata.AttributeHealthStatusYellow)
+ r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatusRed)
case "red":
- r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatus.Green)
- r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatus.Yellow)
- r.mb.RecordElasticsearchClusterHealthDataPoint(now, 1, metadata.AttributeHealthStatus.Red)
+ r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatusGreen)
+ r.mb.RecordElasticsearchClusterHealthDataPoint(now, 0, metadata.AttributeHealthStatusYellow)
+ r.mb.RecordElasticsearchClusterHealthDataPoint(now, 1, metadata.AttributeHealthStatusRed)
default:
errs.AddPartial(1, fmt.Errorf("health status %s: %w", clusterHealth.Status, errUnknownClusterStatus))
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go
index 232a1c8ddcb2..f32bd772e37c 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go
@@ -26,23 +26,23 @@ import (
)
func (s *scraper) recordCPUTimeStateDataPoints(now pcommon.Timestamp, cpuTime cpu.TimesStat) {
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.User, cpuTime.CPU, metadata.AttributeState.User)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.System, cpuTime.CPU, metadata.AttributeState.System)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Idle, cpuTime.CPU, metadata.AttributeState.Idle)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Irq, cpuTime.CPU, metadata.AttributeState.Interrupt)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Nice, cpuTime.CPU, metadata.AttributeState.Nice)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Softirq, cpuTime.CPU, metadata.AttributeState.Softirq)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Steal, cpuTime.CPU, metadata.AttributeState.Steal)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Iowait, cpuTime.CPU, metadata.AttributeState.Wait)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.User, cpuTime.CPU, metadata.AttributeStateUser)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.System, cpuTime.CPU, metadata.AttributeStateSystem)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Idle, cpuTime.CPU, metadata.AttributeStateIdle)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Irq, cpuTime.CPU, metadata.AttributeStateInterrupt)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Nice, cpuTime.CPU, metadata.AttributeStateNice)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Softirq, cpuTime.CPU, metadata.AttributeStateSoftirq)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Steal, cpuTime.CPU, metadata.AttributeStateSteal)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Iowait, cpuTime.CPU, metadata.AttributeStateWait)
}
func (s *scraper) recordCPUUtilization(now pcommon.Timestamp, cpuUtilization ucal.CPUUtilization) {
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.User, cpuUtilization.CPU, metadata.AttributeState.User)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.System, cpuUtilization.CPU, metadata.AttributeState.System)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Idle, cpuUtilization.CPU, metadata.AttributeState.Idle)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Irq, cpuUtilization.CPU, metadata.AttributeState.Interrupt)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Nice, cpuUtilization.CPU, metadata.AttributeState.Nice)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Softirq, cpuUtilization.CPU, metadata.AttributeState.Softirq)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Steal, cpuUtilization.CPU, metadata.AttributeState.Steal)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Iowait, cpuUtilization.CPU, metadata.AttributeState.Wait)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.User, cpuUtilization.CPU, metadata.AttributeStateUser)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.System, cpuUtilization.CPU, metadata.AttributeStateSystem)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Idle, cpuUtilization.CPU, metadata.AttributeStateIdle)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Irq, cpuUtilization.CPU, metadata.AttributeStateInterrupt)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Nice, cpuUtilization.CPU, metadata.AttributeStateNice)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Softirq, cpuUtilization.CPU, metadata.AttributeStateSoftirq)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Steal, cpuUtilization.CPU, metadata.AttributeStateSteal)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Iowait, cpuUtilization.CPU, metadata.AttributeStateWait)
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go
index 74a50e0a7105..06e6d7f86b98 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go
@@ -26,15 +26,15 @@ import (
)
func (s *scraper) recordCPUTimeStateDataPoints(now pcommon.Timestamp, cpuTime cpu.TimesStat) {
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.User, cpuTime.CPU, metadata.AttributeState.User)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.System, cpuTime.CPU, metadata.AttributeState.System)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Idle, cpuTime.CPU, metadata.AttributeState.Idle)
- s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Irq, cpuTime.CPU, metadata.AttributeState.Interrupt)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.User, cpuTime.CPU, metadata.AttributeStateUser)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.System, cpuTime.CPU, metadata.AttributeStateSystem)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Idle, cpuTime.CPU, metadata.AttributeStateIdle)
+ s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Irq, cpuTime.CPU, metadata.AttributeStateInterrupt)
}
func (s *scraper) recordCPUUtilization(now pcommon.Timestamp, cpuUtilization ucal.CPUUtilization) {
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.User, cpuUtilization.CPU, metadata.AttributeState.User)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.System, cpuUtilization.CPU, metadata.AttributeState.System)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Idle, cpuUtilization.CPU, metadata.AttributeState.Idle)
- s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Irq, cpuUtilization.CPU, metadata.AttributeState.Interrupt)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.User, cpuUtilization.CPU, metadata.AttributeStateUser)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.System, cpuUtilization.CPU, metadata.AttributeStateSystem)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Idle, cpuUtilization.CPU, metadata.AttributeStateIdle)
+ s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Irq, cpuUtilization.CPU, metadata.AttributeStateInterrupt)
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go
index 13351104aaac..f96706435825 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go
@@ -360,17 +360,25 @@ func assertCPUMetricValid(t *testing.T, metric pmetric.Metric, startTime pcommon
}
assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 4*runtime.NumCPU())
internal.AssertSumMetricHasAttribute(t, metric, 0, metadata.Attributes.Cpu)
- internal.AssertSumMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.User))
- internal.AssertSumMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.System))
- internal.AssertSumMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Idle))
- internal.AssertSumMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Interrupt))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateUser.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSystem.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateIdle.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateInterrupt.String()))
}
func assertCPUMetricHasLinuxSpecificStateLabels(t *testing.T, metric pmetric.Metric) {
- internal.AssertSumMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Nice))
- internal.AssertSumMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Softirq))
- internal.AssertSumMetricHasAttributeValue(t, metric, 6, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Steal))
- internal.AssertSumMetricHasAttributeValue(t, metric, 7, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Wait))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateNice.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSoftirq.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 6, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSteal.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 7, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateWait.String()))
}
func assertCPUUtilizationMetricValid(t *testing.T, metric pmetric.Metric, startTime pcommon.Timestamp) {
@@ -384,15 +392,23 @@ func assertCPUUtilizationMetricValid(t *testing.T, metric pmetric.Metric, startT
internal.AssertGaugeMetricStartTimeEquals(t, metric, startTime)
}
internal.AssertGaugeMetricHasAttribute(t, metric, 0, metadata.Attributes.Cpu)
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.User))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.System))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Idle))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Interrupt))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateUser.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSystem.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateIdle.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateInterrupt.String()))
}
func assertCPUUtilizationMetricHasLinuxSpecificStateLabels(t *testing.T, metric pmetric.Metric) {
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Nice))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Softirq))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 6, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Steal))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 7, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Wait))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateNice.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSoftirq.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 6, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSteal.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 7, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateWait.String()))
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go
index b2ace03d4670..3bbec94ce6c2 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go
@@ -32,6 +32,56 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeState specifies the a value state attribute.
+type AttributeState int
+
+const (
+ _ AttributeState = iota
+ AttributeStateIdle
+ AttributeStateInterrupt
+ AttributeStateNice
+ AttributeStateSoftirq
+ AttributeStateSteal
+ AttributeStateSystem
+ AttributeStateUser
+ AttributeStateWait
+)
+
+// String returns the string representation of the AttributeState.
+func (av AttributeState) String() string {
+ switch av {
+ case AttributeStateIdle:
+ return "idle"
+ case AttributeStateInterrupt:
+ return "interrupt"
+ case AttributeStateNice:
+ return "nice"
+ case AttributeStateSoftirq:
+ return "softirq"
+ case AttributeStateSteal:
+ return "steal"
+ case AttributeStateSystem:
+ return "system"
+ case AttributeStateUser:
+ return "user"
+ case AttributeStateWait:
+ return "wait"
+ }
+ return ""
+}
+
+// MapAttributeState is a helper map of string to AttributeState attribute value.
+var MapAttributeState = map[string]AttributeState{
+ "idle": AttributeStateIdle,
+ "interrupt": AttributeStateInterrupt,
+ "nice": AttributeStateNice,
+ "softirq": AttributeStateSoftirq,
+ "steal": AttributeStateSteal,
+ "system": AttributeStateSystem,
+ "user": AttributeStateUser,
+ "wait": AttributeStateWait,
+}
+
type metricSystemCPUTime struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -218,13 +268,13 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordSystemCPUTimeDataPoint adds a data point to system.cpu.time metric.
-func (mb *MetricsBuilder) RecordSystemCPUTimeDataPoint(ts pcommon.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) {
- mb.metricSystemCPUTime.recordDataPoint(mb.startTime, ts, val, cpuAttributeValue, stateAttributeValue)
+func (mb *MetricsBuilder) RecordSystemCPUTimeDataPoint(ts pcommon.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue AttributeState) {
+ mb.metricSystemCPUTime.recordDataPoint(mb.startTime, ts, val, cpuAttributeValue, stateAttributeValue.String())
}
// RecordSystemCPUUtilizationDataPoint adds a data point to system.cpu.utilization metric.
-func (mb *MetricsBuilder) RecordSystemCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) {
- mb.metricSystemCPUUtilization.recordDataPoint(mb.startTime, ts, val, cpuAttributeValue, stateAttributeValue)
+func (mb *MetricsBuilder) RecordSystemCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue AttributeState) {
+ mb.metricSystemCPUUtilization.recordDataPoint(mb.startTime, ts, val, cpuAttributeValue, stateAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
@@ -249,24 +299,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- Idle string
- Interrupt string
- Nice string
- Softirq string
- Steal string
- System string
- User string
- Wait string
-}{
- "idle",
- "interrupt",
- "nice",
- "softirq",
- "steal",
- "system",
- "user",
- "wait",
-}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go
index 4433a6d697ce..2c560a019132 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go
@@ -109,15 +109,15 @@ func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) {
func (s *scraper) recordDiskIOMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) {
for device, ioCounter := range ioCounters {
- s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.ReadBytes), device, metadata.AttributeDirection.Read)
- s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.WriteBytes), device, metadata.AttributeDirection.Write)
+ s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.ReadBytes), device, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.WriteBytes), device, metadata.AttributeDirectionWrite)
}
}
func (s *scraper) recordDiskOperationsMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) {
for device, ioCounter := range ioCounters {
- s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.ReadCount), device, metadata.AttributeDirection.Read)
- s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.WriteCount), device, metadata.AttributeDirection.Write)
+ s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.ReadCount), device, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.WriteCount), device, metadata.AttributeDirectionWrite)
}
}
@@ -129,8 +129,8 @@ func (s *scraper) recordDiskIOTimeMetric(now pcommon.Timestamp, ioCounters map[s
func (s *scraper) recordDiskOperationTimeMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) {
for device, ioCounter := range ioCounters {
- s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.ReadTime)/1e3, device, metadata.AttributeDirection.Read)
- s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.WriteTime)/1e3, device, metadata.AttributeDirection.Write)
+ s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.ReadTime)/1e3, device, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.WriteTime)/1e3, device, metadata.AttributeDirectionWrite)
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go
index cc19343fbeef..501238ae5451 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go
@@ -39,7 +39,7 @@ func (s *scraper) recordDiskWeightedIOTimeMetric(now pcommon.Timestamp, ioCounte
func (s *scraper) recordDiskMergedMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) {
for device, ioCounter := range ioCounters {
- s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedReadCount), device, metadata.AttributeDirection.Read)
- s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedWriteCount), device, metadata.AttributeDirection.Write)
+ s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedReadCount), device, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedWriteCount), device, metadata.AttributeDirectionWrite)
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go
index cb47e2f0c4ba..00a7d554575b 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go
@@ -170,8 +170,10 @@ func assertInt64DiskMetricValid(t *testing.T, metric pmetric.Metric, startTime p
assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 2)
internal.AssertSumMetricHasAttribute(t, metric, 0, "device")
- internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.Read))
- internal.AssertSumMetricHasAttributeValue(t, metric, 1, "direction", pcommon.NewValueString(metadata.AttributeDirection.Write))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionRead.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 1, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionWrite.String()))
}
func assertDoubleDiskMetricValid(t *testing.T, metric pmetric.Metric, expectDirectionLabels bool, startTime pcommon.Timestamp) {
@@ -187,8 +189,10 @@ func assertDoubleDiskMetricValid(t *testing.T, metric pmetric.Metric, expectDire
internal.AssertSumMetricHasAttribute(t, metric, 0, "device")
if expectDirectionLabels {
- internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.Read))
- internal.AssertSumMetricHasAttributeValue(t, metric, metric.Sum().DataPoints().Len()-1, "direction", pcommon.NewValueString(metadata.AttributeDirection.Write))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionRead.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, metric.Sum().DataPoints().Len()-1, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionWrite.String()))
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go
index 610038b4f94a..fe00803e8a44 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go
@@ -132,15 +132,15 @@ func (s *scraper) scrape(ctx context.Context) (pmetric.Metrics, error) {
func (s *scraper) recordDiskIOMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) {
for _, logicalDiskCounter := range logicalDiskCounterValues {
- s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[readBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read)
- s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[writeBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write)
+ s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[readBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[writeBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionWrite)
}
}
func (s *scraper) recordDiskOperationsMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) {
for _, logicalDiskCounter := range logicalDiskCounterValues {
- s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[readsPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read)
- s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[writesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write)
+ s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[readsPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[writesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionWrite)
}
}
@@ -153,8 +153,8 @@ func (s *scraper) recordDiskIOTimeMetric(now pcommon.Timestamp, logicalDiskCount
func (s *scraper) recordDiskOperationTimeMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) {
for _, logicalDiskCounter := range logicalDiskCounterValues {
- s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read)
- s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write)
+ s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirectionWrite)
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go
index 125e4a119d94..0f5bcd7e37f4 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go
@@ -52,6 +52,32 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionRead
+ AttributeDirectionWrite
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionRead:
+ return "read"
+ case AttributeDirectionWrite:
+ return "write"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "read": AttributeDirectionRead,
+ "write": AttributeDirectionWrite,
+}
+
type metricSystemDiskIo struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -522,8 +548,8 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordSystemDiskIoDataPoint adds a data point to system.disk.io metric.
-func (mb *MetricsBuilder) RecordSystemDiskIoDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) {
- mb.metricSystemDiskIo.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue)
+func (mb *MetricsBuilder) RecordSystemDiskIoDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricSystemDiskIo.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
// RecordSystemDiskIoTimeDataPoint adds a data point to system.disk.io_time metric.
@@ -532,18 +558,18 @@ func (mb *MetricsBuilder) RecordSystemDiskIoTimeDataPoint(ts pcommon.Timestamp,
}
// RecordSystemDiskMergedDataPoint adds a data point to system.disk.merged metric.
-func (mb *MetricsBuilder) RecordSystemDiskMergedDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) {
- mb.metricSystemDiskMerged.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue)
+func (mb *MetricsBuilder) RecordSystemDiskMergedDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricSystemDiskMerged.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
// RecordSystemDiskOperationTimeDataPoint adds a data point to system.disk.operation_time metric.
-func (mb *MetricsBuilder) RecordSystemDiskOperationTimeDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string, directionAttributeValue string) {
- mb.metricSystemDiskOperationTime.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue)
+func (mb *MetricsBuilder) RecordSystemDiskOperationTimeDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricSystemDiskOperationTime.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
// RecordSystemDiskOperationsDataPoint adds a data point to system.disk.operations metric.
-func (mb *MetricsBuilder) RecordSystemDiskOperationsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) {
- mb.metricSystemDiskOperations.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue)
+func (mb *MetricsBuilder) RecordSystemDiskOperationsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricSystemDiskOperations.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
// RecordSystemDiskPendingOperationsDataPoint adds a data point to system.disk.pending_operations metric.
@@ -578,12 +604,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Read string
- Write string
-}{
- "read",
- "write",
-}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go
index 4b09c10f2199..b7d3edda1782 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go
@@ -31,12 +31,12 @@ func (s *scraper) recordFileSystemUsageMetric(now pcommon.Timestamp, deviceUsage
now, int64(deviceUsage.usage.Used),
deviceUsage.partition.Device, getMountMode(deviceUsage.partition.Opts),
deviceUsage.partition.Mountpoint, deviceUsage.partition.Fstype,
- metadata.AttributeState.Used)
+ metadata.AttributeStateUsed)
s.mb.RecordSystemFilesystemUsageDataPoint(
now, int64(deviceUsage.usage.Free),
deviceUsage.partition.Device, getMountMode(deviceUsage.partition.Opts),
deviceUsage.partition.Mountpoint, deviceUsage.partition.Fstype,
- metadata.AttributeState.Free)
+ metadata.AttributeStateFree)
s.mb.RecordSystemFilesystemUtilizationDataPoint(
now, deviceUsage.usage.UsedPercent/100.0,
deviceUsage.partition.Device, getMountMode(deviceUsage.partition.Opts),
diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go
index f1e7d00a89df..a8688a426dec 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go
@@ -321,12 +321,15 @@ func assertFileSystemUsageMetricValid(
} else {
assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), fileSystemStatesLen)
}
- internal.AssertSumMetricHasAttributeValue(t, metric, 0, "state", pcommon.NewValueString(metadata.AttributeState.Used))
- internal.AssertSumMetricHasAttributeValue(t, metric, 1, "state", pcommon.NewValueString(metadata.AttributeState.Free))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 0, "state",
+ pcommon.NewValueString(metadata.AttributeStateUsed.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 1, "state",
+ pcommon.NewValueString(metadata.AttributeStateFree.String()))
}
func assertFileSystemUsageMetricHasUnixSpecificStateLabels(t *testing.T, metric pmetric.Metric) {
- internal.AssertSumMetricHasAttributeValue(t, metric, 2, "state", pcommon.NewValueString(metadata.AttributeState.Reserved))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 2, "state",
+ pcommon.NewValueString(metadata.AttributeStateReserved.String()))
}
func isUnix() bool {
diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go
index 3e8857bfac5d..59e0e45605b0 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go
@@ -31,17 +31,17 @@ func (s *scraper) recordFileSystemUsageMetric(now pcommon.Timestamp, deviceUsage
now, int64(deviceUsage.usage.Used),
deviceUsage.partition.Device, getMountMode(deviceUsage.partition.Opts), deviceUsage.partition.Mountpoint,
deviceUsage.partition.Fstype,
- metadata.AttributeState.Used)
+ metadata.AttributeStateUsed)
s.mb.RecordSystemFilesystemUsageDataPoint(
now, int64(deviceUsage.usage.Free),
deviceUsage.partition.Device, getMountMode(deviceUsage.partition.Opts),
deviceUsage.partition.Mountpoint, deviceUsage.partition.Fstype,
- metadata.AttributeState.Free)
+ metadata.AttributeStateFree)
s.mb.RecordSystemFilesystemUsageDataPoint(
now, int64(deviceUsage.usage.Total-deviceUsage.usage.Used-deviceUsage.usage.Free),
deviceUsage.partition.Device, getMountMode(deviceUsage.partition.Opts),
deviceUsage.partition.Mountpoint, deviceUsage.partition.Fstype,
- metadata.AttributeState.Reserved)
+ metadata.AttributeStateReserved)
s.mb.RecordSystemFilesystemUtilizationDataPoint(
now, deviceUsage.usage.UsedPercent/100.0,
deviceUsage.partition.Device, getMountMode(deviceUsage.partition.Opts),
@@ -56,10 +56,10 @@ func (s *scraper) recordSystemSpecificMetrics(now pcommon.Timestamp, deviceUsage
s.mb.RecordSystemFilesystemInodesUsageDataPoint(
now, int64(deviceUsage.usage.InodesUsed), deviceUsage.partition.Device,
getMountMode(deviceUsage.partition.Opts), deviceUsage.partition.Mountpoint,
- deviceUsage.partition.Fstype, metadata.AttributeState.Used)
+ deviceUsage.partition.Fstype, metadata.AttributeStateUsed)
s.mb.RecordSystemFilesystemInodesUsageDataPoint(
now, int64(deviceUsage.usage.InodesFree), deviceUsage.partition.Device,
getMountMode(deviceUsage.partition.Opts), deviceUsage.partition.Mountpoint,
- deviceUsage.partition.Fstype, metadata.AttributeState.Free)
+ deviceUsage.partition.Fstype, metadata.AttributeStateFree)
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go
index 9a26a0a5b6b4..5ab67bef2162 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go
@@ -36,6 +36,36 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeState specifies the a value state attribute.
+type AttributeState int
+
+const (
+ _ AttributeState = iota
+ AttributeStateFree
+ AttributeStateReserved
+ AttributeStateUsed
+)
+
+// String returns the string representation of the AttributeState.
+func (av AttributeState) String() string {
+ switch av {
+ case AttributeStateFree:
+ return "free"
+ case AttributeStateReserved:
+ return "reserved"
+ case AttributeStateUsed:
+ return "used"
+ }
+ return ""
+}
+
+// MapAttributeState is a helper map of string to AttributeState attribute value.
+var MapAttributeState = map[string]AttributeState{
+ "free": AttributeStateFree,
+ "reserved": AttributeStateReserved,
+ "used": AttributeStateUsed,
+}
+
type metricSystemFilesystemInodesUsage struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -287,13 +317,13 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordSystemFilesystemInodesUsageDataPoint adds a data point to system.filesystem.inodes.usage metric.
-func (mb *MetricsBuilder) RecordSystemFilesystemInodesUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) {
- mb.metricSystemFilesystemInodesUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, modeAttributeValue, mountpointAttributeValue, typeAttributeValue, stateAttributeValue)
+func (mb *MetricsBuilder) RecordSystemFilesystemInodesUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue AttributeState) {
+ mb.metricSystemFilesystemInodesUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, modeAttributeValue, mountpointAttributeValue, typeAttributeValue, stateAttributeValue.String())
}
// RecordSystemFilesystemUsageDataPoint adds a data point to system.filesystem.usage metric.
-func (mb *MetricsBuilder) RecordSystemFilesystemUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) {
- mb.metricSystemFilesystemUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, modeAttributeValue, mountpointAttributeValue, typeAttributeValue, stateAttributeValue)
+func (mb *MetricsBuilder) RecordSystemFilesystemUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue AttributeState) {
+ mb.metricSystemFilesystemUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, modeAttributeValue, mountpointAttributeValue, typeAttributeValue, stateAttributeValue.String())
}
// RecordSystemFilesystemUtilizationDataPoint adds a data point to system.filesystem.utilization metric.
@@ -332,14 +362,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- Free string
- Reserved string
- Used string
-}{
- "free",
- "reserved",
- "used",
-}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go
index 6404db9cb32e..31f308261311 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go
@@ -32,6 +32,52 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeState specifies the a value state attribute.
+type AttributeState int
+
+const (
+ _ AttributeState = iota
+ AttributeStateBuffered
+ AttributeStateCached
+ AttributeStateInactive
+ AttributeStateFree
+ AttributeStateSlabReclaimable
+ AttributeStateSlabUnreclaimable
+ AttributeStateUsed
+)
+
+// String returns the string representation of the AttributeState.
+func (av AttributeState) String() string {
+ switch av {
+ case AttributeStateBuffered:
+ return "buffered"
+ case AttributeStateCached:
+ return "cached"
+ case AttributeStateInactive:
+ return "inactive"
+ case AttributeStateFree:
+ return "free"
+ case AttributeStateSlabReclaimable:
+ return "slab_reclaimable"
+ case AttributeStateSlabUnreclaimable:
+ return "slab_unreclaimable"
+ case AttributeStateUsed:
+ return "used"
+ }
+ return ""
+}
+
+// MapAttributeState is a helper map of string to AttributeState attribute value.
+var MapAttributeState = map[string]AttributeState{
+ "buffered": AttributeStateBuffered,
+ "cached": AttributeStateCached,
+ "inactive": AttributeStateInactive,
+ "free": AttributeStateFree,
+ "slab_reclaimable": AttributeStateSlabReclaimable,
+ "slab_unreclaimable": AttributeStateSlabUnreclaimable,
+ "used": AttributeStateUsed,
+}
+
type metricSystemMemoryUsage struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -216,13 +262,13 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordSystemMemoryUsageDataPoint adds a data point to system.memory.usage metric.
-func (mb *MetricsBuilder) RecordSystemMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue string) {
- mb.metricSystemMemoryUsage.recordDataPoint(mb.startTime, ts, val, stateAttributeValue)
+func (mb *MetricsBuilder) RecordSystemMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue AttributeState) {
+ mb.metricSystemMemoryUsage.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String())
}
// RecordSystemMemoryUtilizationDataPoint adds a data point to system.memory.utilization metric.
-func (mb *MetricsBuilder) RecordSystemMemoryUtilizationDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue string) {
- mb.metricSystemMemoryUtilization.recordDataPoint(mb.startTime, ts, val, stateAttributeValue)
+func (mb *MetricsBuilder) RecordSystemMemoryUtilizationDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue AttributeState) {
+ mb.metricSystemMemoryUtilization.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
@@ -244,22 +290,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- Buffered string
- Cached string
- Inactive string
- Free string
- SlabReclaimable string
- SlabUnreclaimable string
- Used string
-}{
- "buffered",
- "cached",
- "inactive",
- "free",
- "slab_reclaimable",
- "slab_unreclaimable",
- "used",
-}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go
index 757936c9c5fb..1493a2d15396 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go
@@ -25,19 +25,19 @@ import (
)
func (s *scraper) recordMemoryUsageMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) {
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Used), metadata.AttributeState.Used)
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Free), metadata.AttributeState.Free)
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Buffers), metadata.AttributeState.Buffered)
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Cached), metadata.AttributeState.Cached)
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Sreclaimable), metadata.AttributeState.SlabReclaimable)
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Sunreclaim), metadata.AttributeState.SlabUnreclaimable)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Used), metadata.AttributeStateUsed)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Free), metadata.AttributeStateFree)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Buffers), metadata.AttributeStateBuffered)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Cached), metadata.AttributeStateCached)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Sreclaimable), metadata.AttributeStateSlabReclaimable)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Sunreclaim), metadata.AttributeStateSlabUnreclaimable)
}
func (s *scraper) recordMemoryUtilizationMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) {
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Used)/float64(memInfo.Total), metadata.AttributeState.Used)
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Free)/float64(memInfo.Total), metadata.AttributeState.Free)
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Buffers)/float64(memInfo.Total), metadata.AttributeState.Buffered)
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Cached)/float64(memInfo.Total), metadata.AttributeState.Cached)
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Sreclaimable)/float64(memInfo.Total), metadata.AttributeState.SlabReclaimable)
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Sunreclaim)/float64(memInfo.Total), metadata.AttributeState.SlabUnreclaimable)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Used)/float64(memInfo.Total), metadata.AttributeStateUsed)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Free)/float64(memInfo.Total), metadata.AttributeStateFree)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Buffers)/float64(memInfo.Total), metadata.AttributeStateBuffered)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Cached)/float64(memInfo.Total), metadata.AttributeStateCached)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Sreclaimable)/float64(memInfo.Total), metadata.AttributeStateSlabReclaimable)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Sunreclaim)/float64(memInfo.Total), metadata.AttributeStateSlabUnreclaimable)
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go
index e9ccb0b02ba3..290de5431a0c 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go
@@ -25,13 +25,13 @@ import (
)
func (s *scraper) recordMemoryUsageMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) {
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Used), metadata.AttributeState.Used)
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Free), metadata.AttributeState.Free)
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Inactive), metadata.AttributeState.Inactive)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Used), metadata.AttributeStateUsed)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Free), metadata.AttributeStateFree)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Inactive), metadata.AttributeStateInactive)
}
func (s *scraper) recordMemoryUtilizationMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) {
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Used)/float64(memInfo.Total), metadata.AttributeState.Used)
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Free)/float64(memInfo.Total), metadata.AttributeState.Free)
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Inactive)/float64(memInfo.Total), metadata.AttributeState.Inactive)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Used)/float64(memInfo.Total), metadata.AttributeStateUsed)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Free)/float64(memInfo.Total), metadata.AttributeStateFree)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Inactive)/float64(memInfo.Total), metadata.AttributeStateInactive)
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go
index 4dbf38ac3a05..74244c35b3a6 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go
@@ -123,7 +123,8 @@ func TestScrape(t *testing.T) {
if runtime.GOOS == "linux" {
assertMemoryUsageMetricHasLinuxSpecificStateLabels(t, metrics.At(0))
} else if runtime.GOOS != "windows" {
- internal.AssertSumMetricHasAttributeValue(t, metrics.At(0), 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Inactive))
+ internal.AssertSumMetricHasAttributeValue(t, metrics.At(0), 2, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateInactive.String()))
}
internal.AssertSameTimeStampForAllMetrics(t, metrics)
@@ -178,7 +179,8 @@ func TestScrape_MemoryUtilization(t *testing.T) {
if runtime.GOOS == "linux" {
assertMemoryUtilizationMetricHasLinuxSpecificStateLabels(t, metrics.At(0))
} else if runtime.GOOS != "windows" {
- internal.AssertGaugeMetricHasAttributeValue(t, metrics.At(0), 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Inactive))
+ internal.AssertGaugeMetricHasAttributeValue(t, metrics.At(0), 2, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateInactive.String()))
}
internal.AssertSameTimeStampForAllMetrics(t, metrics)
@@ -189,27 +191,39 @@ func TestScrape_MemoryUtilization(t *testing.T) {
func assertMemoryUsageMetricValid(t *testing.T, metric pmetric.Metric, expectedName string) {
assert.Equal(t, expectedName, metric.Name())
assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 2)
- internal.AssertSumMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Used))
- internal.AssertSumMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Free))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateUsed.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateFree.String()))
}
func assertMemoryUtilizationMetricValid(t *testing.T, metric pmetric.Metric, expectedName string) {
assert.Equal(t, expectedName, metric.Name())
assert.GreaterOrEqual(t, metric.Gauge().DataPoints().Len(), 2)
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Used))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Free))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateUsed.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateFree.String()))
}
func assertMemoryUsageMetricHasLinuxSpecificStateLabels(t *testing.T, metric pmetric.Metric) {
- internal.AssertSumMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Buffered))
- internal.AssertSumMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Cached))
- internal.AssertSumMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.SlabReclaimable))
- internal.AssertSumMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.SlabUnreclaimable))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateBuffered.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateCached.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSlabReclaimable.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSlabUnreclaimable.String()))
}
func assertMemoryUtilizationMetricHasLinuxSpecificStateLabels(t *testing.T, metric pmetric.Metric) {
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Buffered))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Cached))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.SlabReclaimable))
- internal.AssertGaugeMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.SlabUnreclaimable))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateBuffered.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateCached.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSlabReclaimable.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State,
+ pcommon.NewValueString(metadata.AttributeStateSlabUnreclaimable.String()))
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go
index 3579b6517906..08d4ee16a1ca 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go
@@ -25,11 +25,11 @@ import (
)
func (s *scraper) recordMemoryUsageMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) {
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Used), metadata.AttributeState.Used)
- s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Free), metadata.AttributeState.Free)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Used), metadata.AttributeStateUsed)
+ s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Free), metadata.AttributeStateFree)
}
func (s *scraper) recordMemoryUtilizationMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) {
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Used)/float64(memInfo.Total), metadata.AttributeState.Used)
- s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Free)/float64(memInfo.Total), metadata.AttributeState.Free)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Used)/float64(memInfo.Total), metadata.AttributeStateUsed)
+ s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Free)/float64(memInfo.Total), metadata.AttributeStateFree)
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go
index 5e9b4176b7f0..b15c7791d78b 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go
@@ -44,6 +44,54 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionReceive
+ AttributeDirectionTransmit
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionReceive:
+ return "receive"
+ case AttributeDirectionTransmit:
+ return "transmit"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "receive": AttributeDirectionReceive,
+ "transmit": AttributeDirectionTransmit,
+}
+
+// AttributeProtocol specifies the a value protocol attribute.
+type AttributeProtocol int
+
+const (
+ _ AttributeProtocol = iota
+ AttributeProtocolTcp
+)
+
+// String returns the string representation of the AttributeProtocol.
+func (av AttributeProtocol) String() string {
+ switch av {
+ case AttributeProtocolTcp:
+ return "tcp"
+ }
+ return ""
+}
+
+// MapAttributeProtocol is a helper map of string to AttributeProtocol attribute value.
+var MapAttributeProtocol = map[string]AttributeProtocol{
+ "tcp": AttributeProtocolTcp,
+}
+
type metricSystemNetworkConnections struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -403,28 +451,28 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordSystemNetworkConnectionsDataPoint adds a data point to system.network.connections metric.
-func (mb *MetricsBuilder) RecordSystemNetworkConnectionsDataPoint(ts pcommon.Timestamp, val int64, protocolAttributeValue string, stateAttributeValue string) {
- mb.metricSystemNetworkConnections.recordDataPoint(mb.startTime, ts, val, protocolAttributeValue, stateAttributeValue)
+func (mb *MetricsBuilder) RecordSystemNetworkConnectionsDataPoint(ts pcommon.Timestamp, val int64, protocolAttributeValue AttributeProtocol, stateAttributeValue string) {
+ mb.metricSystemNetworkConnections.recordDataPoint(mb.startTime, ts, val, protocolAttributeValue.String(), stateAttributeValue)
}
// RecordSystemNetworkDroppedDataPoint adds a data point to system.network.dropped metric.
-func (mb *MetricsBuilder) RecordSystemNetworkDroppedDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) {
- mb.metricSystemNetworkDropped.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue)
+func (mb *MetricsBuilder) RecordSystemNetworkDroppedDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricSystemNetworkDropped.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
// RecordSystemNetworkErrorsDataPoint adds a data point to system.network.errors metric.
-func (mb *MetricsBuilder) RecordSystemNetworkErrorsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) {
- mb.metricSystemNetworkErrors.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue)
+func (mb *MetricsBuilder) RecordSystemNetworkErrorsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricSystemNetworkErrors.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
// RecordSystemNetworkIoDataPoint adds a data point to system.network.io metric.
-func (mb *MetricsBuilder) RecordSystemNetworkIoDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) {
- mb.metricSystemNetworkIo.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue)
+func (mb *MetricsBuilder) RecordSystemNetworkIoDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricSystemNetworkIo.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
// RecordSystemNetworkPacketsDataPoint adds a data point to system.network.packets metric.
-func (mb *MetricsBuilder) RecordSystemNetworkPacketsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) {
- mb.metricSystemNetworkPackets.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue)
+func (mb *MetricsBuilder) RecordSystemNetworkPacketsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
+ mb.metricSystemNetworkPackets.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
@@ -455,19 +503,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Receive string
- Transmit string
-}{
- "receive",
- "transmit",
-}
-
-// AttributeProtocol are the possible values that the attribute "protocol" can have.
-var AttributeProtocol = struct {
- Tcp string
-}{
- "tcp",
-}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go
index 9a0b3084f162..eb117b6c720b 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go
@@ -123,29 +123,29 @@ func (s *scraper) recordNetworkCounterMetrics() error {
func (s *scraper) recordNetworkPacketsMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) {
for _, ioCounters := range ioCountersSlice {
- s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsSent), ioCounters.Name, metadata.AttributeDirection.Transmit)
- s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsRecv), ioCounters.Name, metadata.AttributeDirection.Receive)
+ s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsSent), ioCounters.Name, metadata.AttributeDirectionTransmit)
+ s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsRecv), ioCounters.Name, metadata.AttributeDirectionReceive)
}
}
func (s *scraper) recordNetworkDroppedPacketsMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) {
for _, ioCounters := range ioCountersSlice {
- s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropout), ioCounters.Name, metadata.AttributeDirection.Transmit)
- s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropin), ioCounters.Name, metadata.AttributeDirection.Receive)
+ s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropout), ioCounters.Name, metadata.AttributeDirectionTransmit)
+ s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropin), ioCounters.Name, metadata.AttributeDirectionReceive)
}
}
func (s *scraper) recordNetworkErrorPacketsMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) {
for _, ioCounters := range ioCountersSlice {
- s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errout), ioCounters.Name, metadata.AttributeDirection.Transmit)
- s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errin), ioCounters.Name, metadata.AttributeDirection.Receive)
+ s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errout), ioCounters.Name, metadata.AttributeDirectionTransmit)
+ s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errin), ioCounters.Name, metadata.AttributeDirectionReceive)
}
}
func (s *scraper) recordNetworkIOMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) {
for _, ioCounters := range ioCountersSlice {
- s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesSent), ioCounters.Name, metadata.AttributeDirection.Transmit)
- s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesRecv), ioCounters.Name, metadata.AttributeDirection.Receive)
+ s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesSent), ioCounters.Name, metadata.AttributeDirectionTransmit)
+ s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesRecv), ioCounters.Name, metadata.AttributeDirectionReceive)
}
}
@@ -177,7 +177,7 @@ func getTCPConnectionStatusCounts(connections []net.ConnectionStat) map[string]i
func (s *scraper) recordNetworkConnectionsMetric(now pcommon.Timestamp, connectionStateCounts map[string]int64) {
for connectionState, count := range connectionStateCounts {
- s.mb.RecordSystemNetworkConnectionsDataPoint(now, count, metadata.AttributeProtocol.Tcp, connectionState)
+ s.mb.RecordSystemNetworkConnectionsDataPoint(now, count, metadata.AttributeProtocolTcp, connectionState)
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go
index 9b57f62d7f8a..7e6876ccea00 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go
@@ -178,13 +178,16 @@ func assertNetworkIOMetricValid(t *testing.T, metric pmetric.Metric, expectedNam
}
assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 2)
internal.AssertSumMetricHasAttribute(t, metric, 0, "device")
- internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.Transmit))
- internal.AssertSumMetricHasAttributeValue(t, metric, 1, "direction", pcommon.NewValueString(metadata.AttributeDirection.Receive))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionTransmit.String()))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 1, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionReceive.String()))
}
func assertNetworkConnectionsMetricValid(t *testing.T, metric pmetric.Metric) {
assert.Equal(t, metric.Name(), "system.network.connections")
- internal.AssertSumMetricHasAttributeValue(t, metric, 0, "protocol", pcommon.NewValueString(metadata.AttributeProtocol.Tcp))
+ internal.AssertSumMetricHasAttributeValue(t, metric, 0, "protocol",
+ pcommon.NewValueString(metadata.AttributeProtocolTcp.String()))
internal.AssertSumMetricHasAttribute(t, metric, 0, "state")
assert.Equal(t, 12, metric.Sum().DataPoints().Len())
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go
index 3c9bcc241f75..d452d60ce556 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go
@@ -40,6 +40,88 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionPageIn
+ AttributeDirectionPageOut
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionPageIn:
+ return "page_in"
+ case AttributeDirectionPageOut:
+ return "page_out"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "page_in": AttributeDirectionPageIn,
+ "page_out": AttributeDirectionPageOut,
+}
+
+// AttributeState specifies the a value state attribute.
+type AttributeState int
+
+const (
+ _ AttributeState = iota
+ AttributeStateCached
+ AttributeStateFree
+ AttributeStateUsed
+)
+
+// String returns the string representation of the AttributeState.
+func (av AttributeState) String() string {
+ switch av {
+ case AttributeStateCached:
+ return "cached"
+ case AttributeStateFree:
+ return "free"
+ case AttributeStateUsed:
+ return "used"
+ }
+ return ""
+}
+
+// MapAttributeState is a helper map of string to AttributeState attribute value.
+var MapAttributeState = map[string]AttributeState{
+ "cached": AttributeStateCached,
+ "free": AttributeStateFree,
+ "used": AttributeStateUsed,
+}
+
+// AttributeType specifies the a value type attribute.
+type AttributeType int
+
+const (
+ _ AttributeType = iota
+ AttributeTypeMajor
+ AttributeTypeMinor
+)
+
+// String returns the string representation of the AttributeType.
+func (av AttributeType) String() string {
+ switch av {
+ case AttributeTypeMajor:
+ return "major"
+ case AttributeTypeMinor:
+ return "minor"
+ }
+ return ""
+}
+
+// MapAttributeType is a helper map of string to AttributeType attribute value.
+var MapAttributeType = map[string]AttributeType{
+ "major": AttributeTypeMajor,
+ "minor": AttributeTypeMinor,
+}
+
type metricSystemPagingFaults struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -339,23 +421,23 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordSystemPagingFaultsDataPoint adds a data point to system.paging.faults metric.
-func (mb *MetricsBuilder) RecordSystemPagingFaultsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue string) {
- mb.metricSystemPagingFaults.recordDataPoint(mb.startTime, ts, val, typeAttributeValue)
+func (mb *MetricsBuilder) RecordSystemPagingFaultsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType) {
+ mb.metricSystemPagingFaults.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String())
}
// RecordSystemPagingOperationsDataPoint adds a data point to system.paging.operations metric.
-func (mb *MetricsBuilder) RecordSystemPagingOperationsDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string, typeAttributeValue string) {
- mb.metricSystemPagingOperations.recordDataPoint(mb.startTime, ts, val, directionAttributeValue, typeAttributeValue)
+func (mb *MetricsBuilder) RecordSystemPagingOperationsDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection, typeAttributeValue AttributeType) {
+ mb.metricSystemPagingOperations.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String(), typeAttributeValue.String())
}
// RecordSystemPagingUsageDataPoint adds a data point to system.paging.usage metric.
-func (mb *MetricsBuilder) RecordSystemPagingUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, stateAttributeValue string) {
- mb.metricSystemPagingUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, stateAttributeValue)
+func (mb *MetricsBuilder) RecordSystemPagingUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, stateAttributeValue AttributeState) {
+ mb.metricSystemPagingUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, stateAttributeValue.String())
}
// RecordSystemPagingUtilizationDataPoint adds a data point to system.paging.utilization metric.
-func (mb *MetricsBuilder) RecordSystemPagingUtilizationDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string, stateAttributeValue string) {
- mb.metricSystemPagingUtilization.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, stateAttributeValue)
+func (mb *MetricsBuilder) RecordSystemPagingUtilizationDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string, stateAttributeValue AttributeState) {
+ mb.metricSystemPagingUtilization.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, stateAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
@@ -386,32 +468,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- PageIn string
- PageOut string
-}{
- "page_in",
- "page_out",
-}
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- Cached string
- Free string
- Used string
-}{
- "cached",
- "free",
- "used",
-}
-
-// AttributeType are the possible values that the attribute "type" can have.
-var AttributeType = struct {
- Major string
- Minor string
-}{
- "major",
- "minor",
-}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go
index 105ca56d24c5..a4539284310e 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go
@@ -93,20 +93,20 @@ func (s *scraper) scrapePagingUsageMetric() error {
func (s *scraper) recordPagingUsageDataPoints(now pcommon.Timestamp, pageFileStats []*pageFileStats) {
for _, pageFile := range pageFileStats {
- s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.usedBytes), pageFile.deviceName, metadata.AttributeState.Used)
- s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.freeBytes), pageFile.deviceName, metadata.AttributeState.Free)
+ s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.usedBytes), pageFile.deviceName, metadata.AttributeStateUsed)
+ s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.freeBytes), pageFile.deviceName, metadata.AttributeStateFree)
if pageFile.cachedBytes != nil {
- s.mb.RecordSystemPagingUsageDataPoint(now, int64(*pageFile.cachedBytes), pageFile.deviceName, metadata.AttributeState.Cached)
+ s.mb.RecordSystemPagingUsageDataPoint(now, int64(*pageFile.cachedBytes), pageFile.deviceName, metadata.AttributeStateCached)
}
}
}
func (s *scraper) recordPagingUtilizationDataPoints(now pcommon.Timestamp, pageFileStats []*pageFileStats) {
for _, pageFile := range pageFileStats {
- s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.usedBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeState.Used)
- s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.freeBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeState.Free)
+ s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.usedBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeStateUsed)
+ s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.freeBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeStateFree)
if pageFile.cachedBytes != nil {
- s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(*pageFile.cachedBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeState.Cached)
+ s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(*pageFile.cachedBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeStateCached)
}
}
}
@@ -124,13 +124,13 @@ func (s *scraper) scrapePagingMetrics() error {
}
func (s *scraper) recordPagingOperationsDataPoints(now pcommon.Timestamp, swap *mem.SwapMemoryStat) {
- s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sin), metadata.AttributeDirection.PageIn, metadata.AttributeType.Major)
- s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sout), metadata.AttributeDirection.PageOut, metadata.AttributeType.Major)
- s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgIn), metadata.AttributeDirection.PageIn, metadata.AttributeType.Minor)
- s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgOut), metadata.AttributeDirection.PageOut, metadata.AttributeType.Minor)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sin), metadata.AttributeDirectionPageIn, metadata.AttributeTypeMajor)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sout), metadata.AttributeDirectionPageOut, metadata.AttributeTypeMajor)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgIn), metadata.AttributeDirectionPageIn, metadata.AttributeTypeMinor)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgOut), metadata.AttributeDirectionPageOut, metadata.AttributeTypeMinor)
}
func (s *scraper) recordPageFaultsDataPoints(now pcommon.Timestamp, swap *mem.SwapMemoryStat) {
- s.mb.RecordSystemPagingFaultsDataPoint(now, int64(swap.PgMajFault), metadata.AttributeType.Major)
- s.mb.RecordSystemPagingFaultsDataPoint(now, int64(swap.PgFault-swap.PgMajFault), metadata.AttributeType.Minor)
+ s.mb.RecordSystemPagingFaultsDataPoint(now, int64(swap.PgMajFault), metadata.AttributeTypeMajor)
+ s.mb.RecordSystemPagingFaultsDataPoint(now, int64(swap.PgFault-swap.PgMajFault), metadata.AttributeTypeMinor)
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go
index 68e078d1df27..f656961d6130 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go
@@ -125,11 +125,14 @@ func assertPagingUsageMetricValid(t *testing.T, hostPagingUsageMetric pmetric.Me
}
assert.GreaterOrEqual(t, hostPagingUsageMetric.Sum().DataPoints().Len(), expectedDataPoints)
- internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 0, "state", pcommon.NewValueString(metadata.AttributeState.Used))
- internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 1, "state", pcommon.NewValueString(metadata.AttributeState.Free))
+ internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 0, "state",
+ pcommon.NewValueString(metadata.AttributeStateUsed.String()))
+ internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 1, "state",
+ pcommon.NewValueString(metadata.AttributeStateFree.String()))
// Windows and Linux do not support cached state label
if runtime.GOOS != "windows" && runtime.GOOS != "linux" {
- internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 2, "state", pcommon.NewValueString(metadata.AttributeState.Cached))
+ internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 2, "state",
+ pcommon.NewValueString(metadata.AttributeStateCached.String()))
}
// on Windows and Linux, also expect the page file device name label
@@ -160,11 +163,14 @@ func assertPagingUtilizationMetricValid(t *testing.T, hostPagingUtilizationMetri
}
assert.GreaterOrEqual(t, hostPagingUtilizationMetric.Gauge().DataPoints().Len(), expectedDataPoints)
- internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 0, "state", pcommon.NewValueString(metadata.AttributeState.Used))
- internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 1, "state", pcommon.NewValueString(metadata.AttributeState.Free))
+ internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 0, "state",
+ pcommon.NewValueString(metadata.AttributeStateUsed.String()))
+ internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 1, "state",
+ pcommon.NewValueString(metadata.AttributeStateFree.String()))
// Windows and Linux do not support cached state label
if runtime.GOOS != "windows" && runtime.GOOS != "linux" {
- internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 2, "state", pcommon.NewValueString(metadata.AttributeState.Cached))
+ internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 2, "state",
+ pcommon.NewValueString(metadata.AttributeStateCached.String()))
}
// on Windows and Linux, also expect the page file device name label
@@ -193,15 +199,23 @@ func assertPagingOperationsMetricValid(t *testing.T, pagingMetric pmetric.Metric
}
assert.Equal(t, expectedDataPoints, pagingMetric.Sum().DataPoints().Len())
- internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 0, "type", pcommon.NewValueString(metadata.AttributeType.Major))
- internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.PageIn))
- internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 1, "type", pcommon.NewValueString(metadata.AttributeType.Major))
- internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 1, "direction", pcommon.NewValueString(metadata.AttributeDirection.PageOut))
+ internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 0, "type",
+ pcommon.NewValueString(metadata.AttributeTypeMajor.String()))
+ internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 0, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionPageIn.String()))
+ internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 1, "type",
+ pcommon.NewValueString(metadata.AttributeTypeMajor.String()))
+ internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 1, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionPageOut.String()))
if runtime.GOOS != "windows" {
- internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 2, "type", pcommon.NewValueString(metadata.AttributeType.Minor))
- internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 2, "direction", pcommon.NewValueString(metadata.AttributeDirection.PageIn))
- internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 3, "type", pcommon.NewValueString(metadata.AttributeType.Minor))
- internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 3, "direction", pcommon.NewValueString(metadata.AttributeDirection.PageOut))
+ internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 2, "type",
+ pcommon.NewValueString(metadata.AttributeTypeMinor.String()))
+ internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 2, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionPageIn.String()))
+ internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 3, "type",
+ pcommon.NewValueString(metadata.AttributeTypeMinor.String()))
+ internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 3, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionPageOut.String()))
}
}
@@ -218,6 +232,8 @@ func assertPageFaultsMetricValid(t *testing.T, pageFaultsMetric pmetric.Metric,
}
assert.Equal(t, 2, pageFaultsMetric.Sum().DataPoints().Len())
- internal.AssertSumMetricHasAttributeValue(t, pageFaultsMetric, 0, "type", pcommon.NewValueString(metadata.AttributeType.Major))
- internal.AssertSumMetricHasAttributeValue(t, pageFaultsMetric, 1, "type", pcommon.NewValueString(metadata.AttributeType.Minor))
+ internal.AssertSumMetricHasAttributeValue(t, pageFaultsMetric, 0, "type",
+ pcommon.NewValueString(metadata.AttributeTypeMajor.String()))
+ internal.AssertSumMetricHasAttributeValue(t, pageFaultsMetric, 1, "type",
+ pcommon.NewValueString(metadata.AttributeTypeMinor.String()))
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go
index f36010a250aa..ab3d220e5884 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go
@@ -101,15 +101,15 @@ func (s *scraper) scrapePagingUsageMetric() error {
func (s *scraper) recordPagingUsageDataPoints(now pcommon.Timestamp, pageFiles []*pageFileStats) {
for _, pageFile := range pageFiles {
- s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.usedBytes), pageFile.deviceName, metadata.AttributeState.Used)
- s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.freeBytes), pageFile.deviceName, metadata.AttributeState.Free)
+ s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.usedBytes), pageFile.deviceName, metadata.AttributeStateUsed)
+ s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.freeBytes), pageFile.deviceName, metadata.AttributeStateFree)
}
}
func (s *scraper) recordPagingUtilizationDataPoints(now pcommon.Timestamp, pageFiles []*pageFileStats) {
for _, pageFile := range pageFiles {
- s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.usedBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeState.Used)
- s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.freeBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeState.Free)
+ s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.usedBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeStateUsed)
+ s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.freeBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeStateFree)
}
}
@@ -138,6 +138,6 @@ func (s *scraper) scrapePagingOperationsMetric() error {
}
func (s *scraper) recordPagingOperationsDataPoints(now pcommon.Timestamp, memoryCounterValues *perfcounters.CounterValues) {
- s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageReadsPerSec], metadata.AttributeDirection.PageIn, metadata.AttributeType.Major)
- s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageWritesPerSec], metadata.AttributeDirection.PageOut, metadata.AttributeType.Major)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageReadsPerSec], metadata.AttributeDirectionPageIn, metadata.AttributeTypeMajor)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageWritesPerSec], metadata.AttributeDirectionPageOut, metadata.AttributeTypeMajor)
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go
index debd551c4a48..f262244f2086 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go
@@ -32,6 +32,76 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeStatus specifies the a value status attribute.
+type AttributeStatus int
+
+const (
+ _ AttributeStatus = iota
+ AttributeStatusBlocked
+ AttributeStatusDaemon
+ AttributeStatusDetached
+ AttributeStatusIdle
+ AttributeStatusLocked
+ AttributeStatusOrphan
+ AttributeStatusPaging
+ AttributeStatusRunning
+ AttributeStatusSleeping
+ AttributeStatusStopped
+ AttributeStatusSystem
+ AttributeStatusUnknown
+ AttributeStatusZombies
+)
+
+// String returns the string representation of the AttributeStatus.
+func (av AttributeStatus) String() string {
+ switch av {
+ case AttributeStatusBlocked:
+ return "blocked"
+ case AttributeStatusDaemon:
+ return "daemon"
+ case AttributeStatusDetached:
+ return "detached"
+ case AttributeStatusIdle:
+ return "idle"
+ case AttributeStatusLocked:
+ return "locked"
+ case AttributeStatusOrphan:
+ return "orphan"
+ case AttributeStatusPaging:
+ return "paging"
+ case AttributeStatusRunning:
+ return "running"
+ case AttributeStatusSleeping:
+ return "sleeping"
+ case AttributeStatusStopped:
+ return "stopped"
+ case AttributeStatusSystem:
+ return "system"
+ case AttributeStatusUnknown:
+ return "unknown"
+ case AttributeStatusZombies:
+ return "zombies"
+ }
+ return ""
+}
+
+// MapAttributeStatus is a helper map of string to AttributeStatus attribute value.
+var MapAttributeStatus = map[string]AttributeStatus{
+ "blocked": AttributeStatusBlocked,
+ "daemon": AttributeStatusDaemon,
+ "detached": AttributeStatusDetached,
+ "idle": AttributeStatusIdle,
+ "locked": AttributeStatusLocked,
+ "orphan": AttributeStatusOrphan,
+ "paging": AttributeStatusPaging,
+ "running": AttributeStatusRunning,
+ "sleeping": AttributeStatusSleeping,
+ "stopped": AttributeStatusStopped,
+ "system": AttributeStatusSystem,
+ "unknown": AttributeStatusUnknown,
+ "zombies": AttributeStatusZombies,
+}
+
type metricSystemProcessesCount struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -216,8 +286,8 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordSystemProcessesCountDataPoint adds a data point to system.processes.count metric.
-func (mb *MetricsBuilder) RecordSystemProcessesCountDataPoint(ts pcommon.Timestamp, val int64, statusAttributeValue string) {
- mb.metricSystemProcessesCount.recordDataPoint(mb.startTime, ts, val, statusAttributeValue)
+func (mb *MetricsBuilder) RecordSystemProcessesCountDataPoint(ts pcommon.Timestamp, val int64, statusAttributeValue AttributeStatus) {
+ mb.metricSystemProcessesCount.recordDataPoint(mb.startTime, ts, val, statusAttributeValue.String())
}
// RecordSystemProcessesCreatedDataPoint adds a data point to system.processes.created metric.
@@ -244,34 +314,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeStatus are the possible values that the attribute "status" can have.
-var AttributeStatus = struct {
- Blocked string
- Daemon string
- Detached string
- Idle string
- Locked string
- Orphan string
- Paging string
- Running string
- Sleeping string
- Stopped string
- System string
- Unknown string
- Zombies string
-}{
- "blocked",
- "daemon",
- "detached",
- "idle",
- "locked",
- "orphan",
- "paging",
- "running",
- "sleeping",
- "stopped",
- "system",
- "unknown",
- "zombies",
-}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go
index 4ae87ad7b060..1c921ee998f0 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go
@@ -56,8 +56,8 @@ type proc interface {
}
type processesMetadata struct {
- countByStatus map[string]int64 // ignored if enableProcessesCount is false
- processesCreated *int64 // ignored if enableProcessesCreated is false
+ countByStatus map[metadata.AttributeStatus]int64 // ignored if enableProcessesCount is false
+ processesCreated *int64 // ignored if enableProcessesCreated is false
}
// newProcessesScraper creates a set of Processes related metrics
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go
index 58835e9b8af4..79d3b466d739 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go
@@ -145,8 +145,8 @@ func validateRealData(t *testing.T, metrics pmetric.MetricSlice) {
}
assert.Failf("missing-metric", "metric is missing %q status label", statusVal)
}
- assertContainsStatus(metadata.AttributeStatus.Running)
- assertContainsStatus(metadata.AttributeStatus.Blocked)
+ assertContainsStatus(metadata.AttributeStatusRunning.String())
+ assertContainsStatus(metadata.AttributeStatusBlocked.String())
}
if expectProcessesCreatedMetric {
@@ -212,15 +212,14 @@ func validateFakeData(t *testing.T, metrics pmetric.MetricSlice) {
attrs[val.StringVal()] = point.IntVal()
}
- ls := metadata.AttributeStatus
assert.Equal(attrs, map[string]int64{
- ls.Blocked: 3,
- ls.Paging: 1,
- ls.Running: 2,
- ls.Sleeping: 4,
- ls.Stopped: 5,
- ls.Unknown: 9,
- ls.Zombies: 6,
+ metadata.AttributeStatusBlocked.String(): 3,
+ metadata.AttributeStatusPaging.String(): 1,
+ metadata.AttributeStatusRunning.String(): 2,
+ metadata.AttributeStatusSleeping.String(): 4,
+ metadata.AttributeStatusStopped.String(): 5,
+ metadata.AttributeStatusUnknown.String(): 9,
+ metadata.AttributeStatusZombies.String(): 6,
})
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go
index bbd7c34e7ba3..0385fba6e89e 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go
@@ -34,7 +34,7 @@ func (s *scraper) getProcessesMetadata() (processesMetadata, error) {
return processesMetadata{}, err
}
- countByStatus := map[string]int64{}
+ countByStatus := map[metadata.AttributeStatus]int64{}
for _, process := range processes {
var status []string
status, err = process.Status()
@@ -45,7 +45,7 @@ func (s *scraper) getProcessesMetadata() (processesMetadata, error) {
}
state, ok := toAttributeStatus(status)
if !ok {
- countByStatus[metadata.AttributeStatus.Unknown]++
+ countByStatus[metadata.AttributeStatusUnknown]++
continue
}
countByStatus[state]++
@@ -65,15 +65,15 @@ func (s *scraper) getProcessesMetadata() (processesMetadata, error) {
procsCreated = &v
}
- countByStatus[metadata.AttributeStatus.Blocked] = int64(miscStat.ProcsBlocked)
- countByStatus[metadata.AttributeStatus.Running] = int64(miscStat.ProcsRunning)
+ countByStatus[metadata.AttributeStatusBlocked] = int64(miscStat.ProcsBlocked)
+ countByStatus[metadata.AttributeStatusRunning] = int64(miscStat.ProcsRunning)
totalKnown := int64(0)
for _, count := range countByStatus {
totalKnown += count
}
if int64(miscStat.ProcsTotal) > totalKnown {
- countByStatus[metadata.AttributeStatus.Unknown] = int64(miscStat.ProcsTotal) - totalKnown
+ countByStatus[metadata.AttributeStatusUnknown] = int64(miscStat.ProcsTotal) - totalKnown
}
return processesMetadata{
@@ -82,25 +82,25 @@ func (s *scraper) getProcessesMetadata() (processesMetadata, error) {
}, nil
}
-func toAttributeStatus(status []string) (string, bool) {
+func toAttributeStatus(status []string) (metadata.AttributeStatus, bool) {
if len(status) == 0 || len(status[0]) == 0 {
- return "", false
+ return metadata.AttributeStatus(0), false
}
state, ok := charToState[status[0]]
return state, ok
}
-var charToState = map[string]string{
- process.Blocked: metadata.AttributeStatus.Blocked,
- process.Daemon: metadata.AttributeStatus.Daemon,
- process.Detached: metadata.AttributeStatus.Detached,
- process.Idle: metadata.AttributeStatus.Idle,
- process.Lock: metadata.AttributeStatus.Locked,
- process.Orphan: metadata.AttributeStatus.Orphan,
- process.Running: metadata.AttributeStatus.Running,
- process.Sleep: metadata.AttributeStatus.Sleeping,
- process.Stop: metadata.AttributeStatus.Stopped,
- process.System: metadata.AttributeStatus.System,
- process.Wait: metadata.AttributeStatus.Paging,
- process.Zombie: metadata.AttributeStatus.Zombies,
+var charToState = map[string]metadata.AttributeStatus{
+ process.Blocked: metadata.AttributeStatusBlocked,
+ process.Daemon: metadata.AttributeStatusDaemon,
+ process.Detached: metadata.AttributeStatusDetached,
+ process.Idle: metadata.AttributeStatusIdle,
+ process.Lock: metadata.AttributeStatusLocked,
+ process.Orphan: metadata.AttributeStatusOrphan,
+ process.Running: metadata.AttributeStatusRunning,
+ process.Sleep: metadata.AttributeStatusSleeping,
+ process.Stop: metadata.AttributeStatusStopped,
+ process.System: metadata.AttributeStatusSystem,
+ process.Wait: metadata.AttributeStatusPaging,
+ process.Zombie: metadata.AttributeStatusZombies,
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go
index 6d03c137617b..52a4da84fb8a 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go
@@ -40,6 +40,62 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionRead
+ AttributeDirectionWrite
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionRead:
+ return "read"
+ case AttributeDirectionWrite:
+ return "write"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "read": AttributeDirectionRead,
+ "write": AttributeDirectionWrite,
+}
+
+// AttributeState specifies the a value state attribute.
+type AttributeState int
+
+const (
+ _ AttributeState = iota
+ AttributeStateSystem
+ AttributeStateUser
+ AttributeStateWait
+)
+
+// String returns the string representation of the AttributeState.
+func (av AttributeState) String() string {
+ switch av {
+ case AttributeStateSystem:
+ return "system"
+ case AttributeStateUser:
+ return "user"
+ case AttributeStateWait:
+ return "wait"
+ }
+ return ""
+}
+
+// MapAttributeState is a helper map of string to AttributeState attribute value.
+var MapAttributeState = map[string]AttributeState{
+ "system": AttributeStateSystem,
+ "user": AttributeStateUser,
+ "wait": AttributeStateWait,
+}
+
type metricProcessCPUTime struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -376,13 +432,13 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordProcessCPUTimeDataPoint adds a data point to process.cpu.time metric.
-func (mb *MetricsBuilder) RecordProcessCPUTimeDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue string) {
- mb.metricProcessCPUTime.recordDataPoint(mb.startTime, ts, val, stateAttributeValue)
+func (mb *MetricsBuilder) RecordProcessCPUTimeDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue AttributeState) {
+ mb.metricProcessCPUTime.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String())
}
// RecordProcessDiskIoDataPoint adds a data point to process.disk.io metric.
-func (mb *MetricsBuilder) RecordProcessDiskIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) {
- mb.metricProcessDiskIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordProcessDiskIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
+ mb.metricProcessDiskIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordProcessMemoryPhysicalUsageDataPoint adds a data point to process.memory.physical_usage metric.
@@ -417,23 +473,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Read string
- Write string
-}{
- "read",
- "write",
-}
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- System string
- User string
- Wait string
-}{
- "system",
- "user",
- "wait",
-}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go
index 645fe8aed2a2..7a19e56a496a 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go
@@ -198,7 +198,7 @@ func (s *scraper) scrapeAndAppendDiskIOMetric(now pcommon.Timestamp, handle proc
return err
}
- s.mb.RecordProcessDiskIoDataPoint(now, int64(io.ReadBytes), metadata.AttributeDirection.Read)
- s.mb.RecordProcessDiskIoDataPoint(now, int64(io.WriteBytes), metadata.AttributeDirection.Write)
+ s.mb.RecordProcessDiskIoDataPoint(now, int64(io.ReadBytes), metadata.AttributeDirectionRead)
+ s.mb.RecordProcessDiskIoDataPoint(now, int64(io.WriteBytes), metadata.AttributeDirectionWrite)
return nil
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go
index 6abca906e703..833834210431 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go
@@ -25,9 +25,9 @@ import (
)
func (s *scraper) recordCPUTimeMetric(now pcommon.Timestamp, cpuTime *cpu.TimesStat) {
- s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.User, metadata.AttributeState.User)
- s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.System, metadata.AttributeState.System)
- s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.Iowait, metadata.AttributeState.Wait)
+ s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.User, metadata.AttributeStateUser)
+ s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.System, metadata.AttributeStateSystem)
+ s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.Iowait, metadata.AttributeStateWait)
}
func getProcessExecutable(proc processHandle) (*executableMetadata, error) {
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go
index 7636bb8e25db..a36e959a68b2 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go
@@ -96,10 +96,13 @@ func assertCPUTimeMetricValid(t *testing.T, resourceMetrics pmetric.ResourceMetr
if startTime != 0 {
internal.AssertSumMetricStartTimeEquals(t, cpuTimeMetric, startTime)
}
- internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 0, "state", pcommon.NewValueString(metadata.AttributeState.User))
- internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 1, "state", pcommon.NewValueString(metadata.AttributeState.System))
+ internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 0, "state",
+ pcommon.NewValueString(metadata.AttributeStateUser.String()))
+ internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 1, "state",
+ pcommon.NewValueString(metadata.AttributeStateSystem.String()))
if runtime.GOOS == "linux" {
- internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 2, "state", pcommon.NewValueString(metadata.AttributeState.Wait))
+ internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 2, "state",
+ pcommon.NewValueString(metadata.AttributeStateWait.String()))
}
}
@@ -121,8 +124,10 @@ func assertDiskIOMetricValid(t *testing.T, resourceMetrics pmetric.ResourceMetri
if startTime != 0 {
internal.AssertSumMetricStartTimeEquals(t, diskIOMetric, startTime)
}
- internal.AssertSumMetricHasAttributeValue(t, diskIOMetric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.Read))
- internal.AssertSumMetricHasAttributeValue(t, diskIOMetric, 1, "direction", pcommon.NewValueString(metadata.AttributeDirection.Write))
+ internal.AssertSumMetricHasAttributeValue(t, diskIOMetric, 0, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionRead.String()))
+ internal.AssertSumMetricHasAttributeValue(t, diskIOMetric, 1, "direction",
+ pcommon.NewValueString(metadata.AttributeDirectionWrite.String()))
}
func assertSameTimeStampForAllMetricsWithinResource(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice) {
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go
index 6f46b0d1bfa6..be6c3dc8361b 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go
@@ -28,8 +28,8 @@ import (
)
func (s *scraper) recordCPUTimeMetric(now pcommon.Timestamp, cpuTime *cpu.TimesStat) {
- s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.User, metadata.AttributeState.User)
- s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.System, metadata.AttributeState.System)
+ s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.User, metadata.AttributeStateUser)
+ s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.System, metadata.AttributeStateSystem)
}
func getProcessExecutable(proc processHandle) (*executableMetadata, error) {
diff --git a/receiver/iisreceiver/internal/metadata/generated_metrics_v2.go b/receiver/iisreceiver/internal/metadata/generated_metrics_v2.go
index 173ea894d3fb..62a606738ee4 100644
--- a/receiver/iisreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/iisreceiver/internal/metadata/generated_metrics_v2.go
@@ -71,6 +71,78 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionSent
+ AttributeDirectionReceived
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionSent:
+ return "sent"
+ case AttributeDirectionReceived:
+ return "received"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "sent": AttributeDirectionSent,
+ "received": AttributeDirectionReceived,
+}
+
+// AttributeRequest specifies the a value request attribute.
+type AttributeRequest int
+
+const (
+ _ AttributeRequest = iota
+ AttributeRequestDelete
+ AttributeRequestGet
+ AttributeRequestHead
+ AttributeRequestOptions
+ AttributeRequestPost
+ AttributeRequestPut
+ AttributeRequestTrace
+)
+
+// String returns the string representation of the AttributeRequest.
+func (av AttributeRequest) String() string {
+ switch av {
+ case AttributeRequestDelete:
+ return "delete"
+ case AttributeRequestGet:
+ return "get"
+ case AttributeRequestHead:
+ return "head"
+ case AttributeRequestOptions:
+ return "options"
+ case AttributeRequestPost:
+ return "post"
+ case AttributeRequestPut:
+ return "put"
+ case AttributeRequestTrace:
+ return "trace"
+ }
+ return ""
+}
+
+// MapAttributeRequest is a helper map of string to AttributeRequest attribute value.
+var MapAttributeRequest = map[string]AttributeRequest{
+ "delete": AttributeRequestDelete,
+ "get": AttributeRequestGet,
+ "head": AttributeRequestHead,
+ "options": AttributeRequestOptions,
+ "post": AttributeRequestPost,
+ "put": AttributeRequestPut,
+ "trace": AttributeRequestTrace,
+}
+
type metricIisConnectionActive struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -814,18 +886,18 @@ func (mb *MetricsBuilder) RecordIisNetworkBlockedDataPoint(ts pcommon.Timestamp,
}
// RecordIisNetworkFileCountDataPoint adds a data point to iis.network.file.count metric.
-func (mb *MetricsBuilder) RecordIisNetworkFileCountDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) {
- mb.metricIisNetworkFileCount.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordIisNetworkFileCountDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
+ mb.metricIisNetworkFileCount.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordIisNetworkIoDataPoint adds a data point to iis.network.io metric.
-func (mb *MetricsBuilder) RecordIisNetworkIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) {
- mb.metricIisNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordIisNetworkIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
+ mb.metricIisNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordIisRequestCountDataPoint adds a data point to iis.request.count metric.
-func (mb *MetricsBuilder) RecordIisRequestCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue string) {
- mb.metricIisRequestCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue)
+func (mb *MetricsBuilder) RecordIisRequestCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue AttributeRequest) {
+ mb.metricIisRequestCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue.String())
}
// RecordIisRequestQueueAgeMaxDataPoint adds a data point to iis.request.queue.age.max metric.
@@ -875,31 +947,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Sent string
- Received string
-}{
- "sent",
- "received",
-}
-
-// AttributeRequest are the possible values that the attribute "request" can have.
-var AttributeRequest = struct {
- Delete string
- Get string
- Head string
- Options string
- Post string
- Put string
- Trace string
-}{
- "delete",
- "get",
- "head",
- "options",
- "post",
- "put",
- "trace",
-}
diff --git a/receiver/iisreceiver/internal/metadata/recorder.go b/receiver/iisreceiver/internal/metadata/recorder.go
index 8d848530d09f..365f89090d7f 100644
--- a/receiver/iisreceiver/internal/metadata/recorder.go
+++ b/receiver/iisreceiver/internal/metadata/recorder.go
@@ -29,11 +29,11 @@ func (mb *MetricsBuilder) RecordAny(ts pcommon.Timestamp, val float64, name stri
case "iis.network.blocked":
mb.RecordIisNetworkBlockedDataPoint(ts, int64(val))
case "iis.network.file.count":
- mb.RecordIisNetworkFileCountDataPoint(ts, int64(val), attributes[A.Direction])
+ mb.RecordIisNetworkFileCountDataPoint(ts, int64(val), MapAttributeDirection[attributes[A.Direction]])
case "iis.network.io":
- mb.RecordIisNetworkIoDataPoint(ts, int64(val), attributes[A.Direction])
+ mb.RecordIisNetworkIoDataPoint(ts, int64(val), MapAttributeDirection[attributes[A.Direction]])
case "iis.request.count":
- mb.RecordIisRequestCountDataPoint(ts, int64(val), attributes[A.Request])
+ mb.RecordIisRequestCountDataPoint(ts, int64(val), MapAttributeRequest[attributes[A.Request]])
case "iis.request.queue.age.max":
mb.RecordIisRequestQueueAgeMaxDataPoint(ts, int64(val))
case "iis.request.queue.count":
diff --git a/receiver/memcachedreceiver/internal/metadata/generated_metrics_v2.go b/receiver/memcachedreceiver/internal/metadata/generated_metrics_v2.go
index 529d8842fde2..4dee36f467e8 100644
--- a/receiver/memcachedreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/memcachedreceiver/internal/metadata/generated_metrics_v2.go
@@ -67,6 +67,148 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeCommand specifies the a value command attribute.
+type AttributeCommand int
+
+const (
+ _ AttributeCommand = iota
+ AttributeCommandGet
+ AttributeCommandSet
+ AttributeCommandFlush
+ AttributeCommandTouch
+)
+
+// String returns the string representation of the AttributeCommand.
+func (av AttributeCommand) String() string {
+ switch av {
+ case AttributeCommandGet:
+ return "get"
+ case AttributeCommandSet:
+ return "set"
+ case AttributeCommandFlush:
+ return "flush"
+ case AttributeCommandTouch:
+ return "touch"
+ }
+ return ""
+}
+
+// MapAttributeCommand is a helper map of string to AttributeCommand attribute value.
+var MapAttributeCommand = map[string]AttributeCommand{
+ "get": AttributeCommandGet,
+ "set": AttributeCommandSet,
+ "flush": AttributeCommandFlush,
+ "touch": AttributeCommandTouch,
+}
+
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionSent
+ AttributeDirectionReceived
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionSent:
+ return "sent"
+ case AttributeDirectionReceived:
+ return "received"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "sent": AttributeDirectionSent,
+ "received": AttributeDirectionReceived,
+}
+
+// AttributeOperation specifies the a value operation attribute.
+type AttributeOperation int
+
+const (
+ _ AttributeOperation = iota
+ AttributeOperationIncrement
+ AttributeOperationDecrement
+ AttributeOperationGet
+)
+
+// String returns the string representation of the AttributeOperation.
+func (av AttributeOperation) String() string {
+ switch av {
+ case AttributeOperationIncrement:
+ return "increment"
+ case AttributeOperationDecrement:
+ return "decrement"
+ case AttributeOperationGet:
+ return "get"
+ }
+ return ""
+}
+
+// MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
+var MapAttributeOperation = map[string]AttributeOperation{
+ "increment": AttributeOperationIncrement,
+ "decrement": AttributeOperationDecrement,
+ "get": AttributeOperationGet,
+}
+
+// AttributeState specifies the a value state attribute.
+type AttributeState int
+
+const (
+ _ AttributeState = iota
+ AttributeStateSystem
+ AttributeStateUser
+)
+
+// String returns the string representation of the AttributeState.
+func (av AttributeState) String() string {
+ switch av {
+ case AttributeStateSystem:
+ return "system"
+ case AttributeStateUser:
+ return "user"
+ }
+ return ""
+}
+
+// MapAttributeState is a helper map of string to AttributeState attribute value.
+var MapAttributeState = map[string]AttributeState{
+ "system": AttributeStateSystem,
+ "user": AttributeStateUser,
+}
+
+// AttributeType specifies the a value type attribute.
+type AttributeType int
+
+const (
+ _ AttributeType = iota
+ AttributeTypeHit
+ AttributeTypeMiss
+)
+
+// String returns the string representation of the AttributeType.
+func (av AttributeType) String() string {
+ switch av {
+ case AttributeTypeHit:
+ return "hit"
+ case AttributeTypeMiss:
+ return "miss"
+ }
+ return ""
+}
+
+// MapAttributeType is a helper map of string to AttributeType attribute value.
+var MapAttributeType = map[string]AttributeType{
+ "hit": AttributeTypeHit,
+ "miss": AttributeTypeMiss,
+}
+
type metricMemcachedBytes struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -746,8 +888,8 @@ func (mb *MetricsBuilder) RecordMemcachedBytesDataPoint(ts pcommon.Timestamp, va
}
// RecordMemcachedCommandsDataPoint adds a data point to memcached.commands metric.
-func (mb *MetricsBuilder) RecordMemcachedCommandsDataPoint(ts pcommon.Timestamp, val int64, commandAttributeValue string) {
- mb.metricMemcachedCommands.recordDataPoint(mb.startTime, ts, val, commandAttributeValue)
+func (mb *MetricsBuilder) RecordMemcachedCommandsDataPoint(ts pcommon.Timestamp, val int64, commandAttributeValue AttributeCommand) {
+ mb.metricMemcachedCommands.recordDataPoint(mb.startTime, ts, val, commandAttributeValue.String())
}
// RecordMemcachedConnectionsCurrentDataPoint adds a data point to memcached.connections.current metric.
@@ -761,8 +903,8 @@ func (mb *MetricsBuilder) RecordMemcachedConnectionsTotalDataPoint(ts pcommon.Ti
}
// RecordMemcachedCPUUsageDataPoint adds a data point to memcached.cpu.usage metric.
-func (mb *MetricsBuilder) RecordMemcachedCPUUsageDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue string) {
- mb.metricMemcachedCPUUsage.recordDataPoint(mb.startTime, ts, val, stateAttributeValue)
+func (mb *MetricsBuilder) RecordMemcachedCPUUsageDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue AttributeState) {
+ mb.metricMemcachedCPUUsage.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String())
}
// RecordMemcachedCurrentItemsDataPoint adds a data point to memcached.current_items metric.
@@ -776,18 +918,18 @@ func (mb *MetricsBuilder) RecordMemcachedEvictionsDataPoint(ts pcommon.Timestamp
}
// RecordMemcachedNetworkDataPoint adds a data point to memcached.network metric.
-func (mb *MetricsBuilder) RecordMemcachedNetworkDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) {
- mb.metricMemcachedNetwork.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordMemcachedNetworkDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
+ mb.metricMemcachedNetwork.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMemcachedOperationHitRatioDataPoint adds a data point to memcached.operation_hit_ratio metric.
-func (mb *MetricsBuilder) RecordMemcachedOperationHitRatioDataPoint(ts pcommon.Timestamp, val float64, operationAttributeValue string) {
- mb.metricMemcachedOperationHitRatio.recordDataPoint(mb.startTime, ts, val, operationAttributeValue)
+func (mb *MetricsBuilder) RecordMemcachedOperationHitRatioDataPoint(ts pcommon.Timestamp, val float64, operationAttributeValue AttributeOperation) {
+ mb.metricMemcachedOperationHitRatio.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String())
}
// RecordMemcachedOperationsDataPoint adds a data point to memcached.operations metric.
-func (mb *MetricsBuilder) RecordMemcachedOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue string, operationAttributeValue string) {
- mb.metricMemcachedOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue, operationAttributeValue)
+func (mb *MetricsBuilder) RecordMemcachedOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType, operationAttributeValue AttributeOperation) {
+ mb.metricMemcachedOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String(), operationAttributeValue.String())
}
// RecordMemcachedThreadsDataPoint adds a data point to memcached.threads metric.
@@ -826,54 +968,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeCommand are the possible values that the attribute "command" can have.
-var AttributeCommand = struct {
- Get string
- Set string
- Flush string
- Touch string
-}{
- "get",
- "set",
- "flush",
- "touch",
-}
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Sent string
- Received string
-}{
- "sent",
- "received",
-}
-
-// AttributeOperation are the possible values that the attribute "operation" can have.
-var AttributeOperation = struct {
- Increment string
- Decrement string
- Get string
-}{
- "increment",
- "decrement",
- "get",
-}
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- System string
- User string
-}{
- "system",
- "user",
-}
-
-// AttributeType are the possible values that the attribute "type" can have.
-var AttributeType = struct {
- Hit string
- Miss string
-}{
- "hit",
- "miss",
-}
diff --git a/receiver/memcachedreceiver/scraper.go b/receiver/memcachedreceiver/scraper.go
index d3a1f9a2bba8..efafbf68fd63 100644
--- a/receiver/memcachedreceiver/scraper.go
+++ b/receiver/memcachedreceiver/scraper.go
@@ -79,19 +79,19 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
}
case "cmd_get":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, "get")
+ r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, metadata.AttributeCommandGet)
}
case "cmd_set":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, "set")
+ r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, metadata.AttributeCommandSet)
}
case "cmd_flush":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, "flush")
+ r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, metadata.AttributeCommandFlush)
}
case "cmd_touch":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, "touch")
+ r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, metadata.AttributeCommandTouch)
}
case "curr_items":
if parsedV, ok := r.parseInt(k, v); ok {
@@ -109,44 +109,50 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
}
case "bytes_read":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedNetworkDataPoint(now, parsedV, "received")
+ r.mb.RecordMemcachedNetworkDataPoint(now, parsedV, metadata.AttributeDirectionReceived)
}
case "bytes_written":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedNetworkDataPoint(now, parsedV, "sent")
+ r.mb.RecordMemcachedNetworkDataPoint(now, parsedV, metadata.AttributeDirectionSent)
}
case "get_hits":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "hit", "get")
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, metadata.AttributeTypeHit,
+ metadata.AttributeOperationGet)
}
case "get_misses":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "miss", "get")
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, metadata.AttributeTypeMiss,
+ metadata.AttributeOperationGet)
}
case "incr_hits":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "hit", "increment")
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, metadata.AttributeTypeHit,
+ metadata.AttributeOperationIncrement)
}
case "incr_misses":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "miss", "increment")
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, metadata.AttributeTypeMiss,
+ metadata.AttributeOperationIncrement)
}
case "decr_hits":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "hit", "decrement")
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, metadata.AttributeTypeHit,
+ metadata.AttributeOperationDecrement)
}
case "decr_misses":
if parsedV, ok := r.parseInt(k, v); ok {
- r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "miss", "decrement")
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, metadata.AttributeTypeMiss,
+ metadata.AttributeOperationDecrement)
}
case "rusage_system":
if parsedV, ok := r.parseFloat(k, v); ok {
- r.mb.RecordMemcachedCPUUsageDataPoint(now, parsedV, "system")
+ r.mb.RecordMemcachedCPUUsageDataPoint(now, parsedV, metadata.AttributeStateSystem)
}
case "rusage_user":
if parsedV, ok := r.parseFloat(k, v); ok {
- r.mb.RecordMemcachedCPUUsageDataPoint(now, parsedV, "user")
+ r.mb.RecordMemcachedCPUUsageDataPoint(now, parsedV, metadata.AttributeStateUser)
}
}
}
@@ -157,7 +163,8 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
parsedHit, okHit := r.parseInt("incr_hits", stats.Stats["incr_hits"])
parsedMiss, okMiss := r.parseInt("incr_misses", stats.Stats["incr_misses"])
if okHit && okMiss {
- r.mb.RecordMemcachedOperationHitRatioDataPoint(now, calculateHitRatio(parsedHit, parsedMiss), "increment")
+ r.mb.RecordMemcachedOperationHitRatioDataPoint(now, calculateHitRatio(parsedHit, parsedMiss),
+ metadata.AttributeOperationIncrement)
}
attributes = pcommon.NewMap()
@@ -165,7 +172,8 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
parsedHit, okHit = r.parseInt("decr_hits", stats.Stats["decr_hits"])
parsedMiss, okMiss = r.parseInt("decr_misses", stats.Stats["decr_misses"])
if okHit && okMiss {
- r.mb.RecordMemcachedOperationHitRatioDataPoint(now, calculateHitRatio(parsedHit, parsedMiss), "decrement")
+ r.mb.RecordMemcachedOperationHitRatioDataPoint(now, calculateHitRatio(parsedHit, parsedMiss),
+ metadata.AttributeOperationDecrement)
}
attributes = pcommon.NewMap()
@@ -173,7 +181,7 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
parsedHit, okHit = r.parseInt("get_hits", stats.Stats["get_hits"])
parsedMiss, okMiss = r.parseInt("get_misses", stats.Stats["get_misses"])
if okHit && okMiss {
- r.mb.RecordMemcachedOperationHitRatioDataPoint(now, calculateHitRatio(parsedHit, parsedMiss), "get")
+ r.mb.RecordMemcachedOperationHitRatioDataPoint(now, calculateHitRatio(parsedHit, parsedMiss), metadata.AttributeOperationGet)
}
}
diff --git a/receiver/mongodbatlasreceiver/documentation.md b/receiver/mongodbatlasreceiver/documentation.md
index bfb41f87ff2a..f75090df95c5 100644
--- a/receiver/mongodbatlasreceiver/documentation.md
+++ b/receiver/mongodbatlasreceiver/documentation.md
@@ -69,8 +69,8 @@ These are the metrics available for this scraper.
| **mongodbatlas.system.network.io.max** | System Network IO Aggregate of MongoDB Metrics MAX_SYSTEM_NETWORK_OUT, MAX_SYSTEM_NETWORK_IN | By/s | Gauge(Double) |
|
| **mongodbatlas.system.paging.io.average** | Swap IO Aggregate of MongoDB Metrics SWAP_IO_IN, SWAP_IO_OUT | {pages}/s | Gauge(Double) | |
| **mongodbatlas.system.paging.io.max** | Swap IO Aggregate of MongoDB Metrics MAX_SWAP_IO_IN, MAX_SWAP_IO_OUT | {pages}/s | Gauge(Double) | |
-| **mongodbatlas.system.paging.usage.average** | Swap usage Aggregate of MongoDB Metrics SWAP_USAGE_FREE, SWAP_USAGE_USED | KiBy | Gauge(Double) | |
-| **mongodbatlas.system.paging.usage.max** | Swap usage Aggregate of MongoDB Metrics MAX_SWAP_USAGE_FREE, MAX_SWAP_USAGE_USED | KiBy | Gauge(Double) | |
+| **mongodbatlas.system.paging.usage.average** | Swap usage Aggregate of MongoDB Metrics SWAP_USAGE_FREE, SWAP_USAGE_USED | KiBy | Gauge(Double) | |
+| **mongodbatlas.system.paging.usage.max** | Swap usage Aggregate of MongoDB Metrics MAX_SWAP_USAGE_FREE, MAX_SWAP_USAGE_USED | KiBy | Gauge(Double) | |
**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default.
Any metric can be enabled or disabled with the following scraper configuration:
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go
index 1427367b84db..1bd6af58ad7d 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go
@@ -275,6 +275,726 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeAssertType specifies the a value assert_type attribute.
+type AttributeAssertType int
+
+const (
+ _ AttributeAssertType = iota
+ AttributeAssertTypeRegular
+ AttributeAssertTypeWarning
+ AttributeAssertTypeMsg
+ AttributeAssertTypeUser
+)
+
+// String returns the string representation of the AttributeAssertType.
+func (av AttributeAssertType) String() string {
+ switch av {
+ case AttributeAssertTypeRegular:
+ return "regular"
+ case AttributeAssertTypeWarning:
+ return "warning"
+ case AttributeAssertTypeMsg:
+ return "msg"
+ case AttributeAssertTypeUser:
+ return "user"
+ }
+ return ""
+}
+
+// MapAttributeAssertType is a helper map of string to AttributeAssertType attribute value.
+var MapAttributeAssertType = map[string]AttributeAssertType{
+ "regular": AttributeAssertTypeRegular,
+ "warning": AttributeAssertTypeWarning,
+ "msg": AttributeAssertTypeMsg,
+ "user": AttributeAssertTypeUser,
+}
+
+// AttributeBtreeCounterType specifies the a value btree_counter_type attribute.
+type AttributeBtreeCounterType int
+
+const (
+ _ AttributeBtreeCounterType = iota
+ AttributeBtreeCounterTypeAccesses
+ AttributeBtreeCounterTypeHits
+ AttributeBtreeCounterTypeMisses
+)
+
+// String returns the string representation of the AttributeBtreeCounterType.
+func (av AttributeBtreeCounterType) String() string {
+ switch av {
+ case AttributeBtreeCounterTypeAccesses:
+ return "accesses"
+ case AttributeBtreeCounterTypeHits:
+ return "hits"
+ case AttributeBtreeCounterTypeMisses:
+ return "misses"
+ }
+ return ""
+}
+
+// MapAttributeBtreeCounterType is a helper map of string to AttributeBtreeCounterType attribute value.
+var MapAttributeBtreeCounterType = map[string]AttributeBtreeCounterType{
+ "accesses": AttributeBtreeCounterTypeAccesses,
+ "hits": AttributeBtreeCounterTypeHits,
+ "misses": AttributeBtreeCounterTypeMisses,
+}
+
+// AttributeCacheDirection specifies the a value cache_direction attribute.
+type AttributeCacheDirection int
+
+const (
+ _ AttributeCacheDirection = iota
+ AttributeCacheDirectionReadInto
+ AttributeCacheDirectionWrittenFrom
+)
+
+// String returns the string representation of the AttributeCacheDirection.
+func (av AttributeCacheDirection) String() string {
+ switch av {
+ case AttributeCacheDirectionReadInto:
+ return "read_into"
+ case AttributeCacheDirectionWrittenFrom:
+ return "written_from"
+ }
+ return ""
+}
+
+// MapAttributeCacheDirection is a helper map of string to AttributeCacheDirection attribute value.
+var MapAttributeCacheDirection = map[string]AttributeCacheDirection{
+ "read_into": AttributeCacheDirectionReadInto,
+ "written_from": AttributeCacheDirectionWrittenFrom,
+}
+
+// AttributeCacheStatus specifies the a value cache_status attribute.
+type AttributeCacheStatus int
+
+const (
+ _ AttributeCacheStatus = iota
+ AttributeCacheStatusDirty
+ AttributeCacheStatusUsed
+)
+
+// String returns the string representation of the AttributeCacheStatus.
+func (av AttributeCacheStatus) String() string {
+ switch av {
+ case AttributeCacheStatusDirty:
+ return "dirty"
+ case AttributeCacheStatusUsed:
+ return "used"
+ }
+ return ""
+}
+
+// MapAttributeCacheStatus is a helper map of string to AttributeCacheStatus attribute value.
+var MapAttributeCacheStatus = map[string]AttributeCacheStatus{
+ "dirty": AttributeCacheStatusDirty,
+ "used": AttributeCacheStatusUsed,
+}
+
+// AttributeClusterRole specifies the a value cluster_role attribute.
+type AttributeClusterRole int
+
+const (
+ _ AttributeClusterRole = iota
+ AttributeClusterRolePrimary
+ AttributeClusterRoleReplica
+)
+
+// String returns the string representation of the AttributeClusterRole.
+func (av AttributeClusterRole) String() string {
+ switch av {
+ case AttributeClusterRolePrimary:
+ return "primary"
+ case AttributeClusterRoleReplica:
+ return "replica"
+ }
+ return ""
+}
+
+// MapAttributeClusterRole is a helper map of string to AttributeClusterRole attribute value.
+var MapAttributeClusterRole = map[string]AttributeClusterRole{
+ "primary": AttributeClusterRolePrimary,
+ "replica": AttributeClusterRoleReplica,
+}
+
+// AttributeCPUState specifies the a value cpu_state attribute.
+type AttributeCPUState int
+
+const (
+ _ AttributeCPUState = iota
+ AttributeCPUStateKernel
+ AttributeCPUStateUser
+ AttributeCPUStateNice
+ AttributeCPUStateIowait
+ AttributeCPUStateIrq
+ AttributeCPUStateSoftirq
+ AttributeCPUStateGuest
+ AttributeCPUStateSteal
+)
+
+// String returns the string representation of the AttributeCPUState.
+func (av AttributeCPUState) String() string {
+ switch av {
+ case AttributeCPUStateKernel:
+ return "kernel"
+ case AttributeCPUStateUser:
+ return "user"
+ case AttributeCPUStateNice:
+ return "nice"
+ case AttributeCPUStateIowait:
+ return "iowait"
+ case AttributeCPUStateIrq:
+ return "irq"
+ case AttributeCPUStateSoftirq:
+ return "softirq"
+ case AttributeCPUStateGuest:
+ return "guest"
+ case AttributeCPUStateSteal:
+ return "steal"
+ }
+ return ""
+}
+
+// MapAttributeCPUState is a helper map of string to AttributeCPUState attribute value.
+var MapAttributeCPUState = map[string]AttributeCPUState{
+ "kernel": AttributeCPUStateKernel,
+ "user": AttributeCPUStateUser,
+ "nice": AttributeCPUStateNice,
+ "iowait": AttributeCPUStateIowait,
+ "irq": AttributeCPUStateIrq,
+ "softirq": AttributeCPUStateSoftirq,
+ "guest": AttributeCPUStateGuest,
+ "steal": AttributeCPUStateSteal,
+}
+
+// AttributeCursorState specifies the a value cursor_state attribute.
+type AttributeCursorState int
+
+const (
+ _ AttributeCursorState = iota
+ AttributeCursorStateTimedOut
+ AttributeCursorStateOpen
+)
+
+// String returns the string representation of the AttributeCursorState.
+func (av AttributeCursorState) String() string {
+ switch av {
+ case AttributeCursorStateTimedOut:
+ return "timed_out"
+ case AttributeCursorStateOpen:
+ return "open"
+ }
+ return ""
+}
+
+// MapAttributeCursorState is a helper map of string to AttributeCursorState attribute value.
+var MapAttributeCursorState = map[string]AttributeCursorState{
+ "timed_out": AttributeCursorStateTimedOut,
+ "open": AttributeCursorStateOpen,
+}
+
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionReceive
+ AttributeDirectionTransmit
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionReceive:
+ return "receive"
+ case AttributeDirectionTransmit:
+ return "transmit"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "receive": AttributeDirectionReceive,
+ "transmit": AttributeDirectionTransmit,
+}
+
+// AttributeDiskDirection specifies the a value disk_direction attribute.
+type AttributeDiskDirection int
+
+const (
+ _ AttributeDiskDirection = iota
+ AttributeDiskDirectionRead
+ AttributeDiskDirectionWrite
+ AttributeDiskDirectionTotal
+)
+
+// String returns the string representation of the AttributeDiskDirection.
+func (av AttributeDiskDirection) String() string {
+ switch av {
+ case AttributeDiskDirectionRead:
+ return "read"
+ case AttributeDiskDirectionWrite:
+ return "write"
+ case AttributeDiskDirectionTotal:
+ return "total"
+ }
+ return ""
+}
+
+// MapAttributeDiskDirection is a helper map of string to AttributeDiskDirection attribute value.
+var MapAttributeDiskDirection = map[string]AttributeDiskDirection{
+ "read": AttributeDiskDirectionRead,
+ "write": AttributeDiskDirectionWrite,
+ "total": AttributeDiskDirectionTotal,
+}
+
+// AttributeDiskStatus specifies the a value disk_status attribute.
+type AttributeDiskStatus int
+
+const (
+ _ AttributeDiskStatus = iota
+ AttributeDiskStatusFree
+ AttributeDiskStatusUsed
+)
+
+// String returns the string representation of the AttributeDiskStatus.
+func (av AttributeDiskStatus) String() string {
+ switch av {
+ case AttributeDiskStatusFree:
+ return "free"
+ case AttributeDiskStatusUsed:
+ return "used"
+ }
+ return ""
+}
+
+// MapAttributeDiskStatus is a helper map of string to AttributeDiskStatus attribute value.
+var MapAttributeDiskStatus = map[string]AttributeDiskStatus{
+ "free": AttributeDiskStatusFree,
+ "used": AttributeDiskStatusUsed,
+}
+
+// AttributeDocumentStatus specifies the a value document_status attribute.
+type AttributeDocumentStatus int
+
+const (
+ _ AttributeDocumentStatus = iota
+ AttributeDocumentStatusReturned
+ AttributeDocumentStatusInserted
+ AttributeDocumentStatusUpdated
+ AttributeDocumentStatusDeleted
+)
+
+// String returns the string representation of the AttributeDocumentStatus.
+func (av AttributeDocumentStatus) String() string {
+ switch av {
+ case AttributeDocumentStatusReturned:
+ return "returned"
+ case AttributeDocumentStatusInserted:
+ return "inserted"
+ case AttributeDocumentStatusUpdated:
+ return "updated"
+ case AttributeDocumentStatusDeleted:
+ return "deleted"
+ }
+ return ""
+}
+
+// MapAttributeDocumentStatus is a helper map of string to AttributeDocumentStatus attribute value.
+var MapAttributeDocumentStatus = map[string]AttributeDocumentStatus{
+ "returned": AttributeDocumentStatusReturned,
+ "inserted": AttributeDocumentStatusInserted,
+ "updated": AttributeDocumentStatusUpdated,
+ "deleted": AttributeDocumentStatusDeleted,
+}
+
+// AttributeExecutionType specifies the a value execution_type attribute.
+type AttributeExecutionType int
+
+const (
+ _ AttributeExecutionType = iota
+ AttributeExecutionTypeReads
+ AttributeExecutionTypeWrites
+ AttributeExecutionTypeCommands
+)
+
+// String returns the string representation of the AttributeExecutionType.
+func (av AttributeExecutionType) String() string {
+ switch av {
+ case AttributeExecutionTypeReads:
+ return "reads"
+ case AttributeExecutionTypeWrites:
+ return "writes"
+ case AttributeExecutionTypeCommands:
+ return "commands"
+ }
+ return ""
+}
+
+// MapAttributeExecutionType is a helper map of string to AttributeExecutionType attribute value.
+var MapAttributeExecutionType = map[string]AttributeExecutionType{
+ "reads": AttributeExecutionTypeReads,
+ "writes": AttributeExecutionTypeWrites,
+ "commands": AttributeExecutionTypeCommands,
+}
+
+// AttributeGlobalLockState specifies the a value global_lock_state attribute.
+type AttributeGlobalLockState int
+
+const (
+ _ AttributeGlobalLockState = iota
+ AttributeGlobalLockStateCurrentQueueTotal
+ AttributeGlobalLockStateCurrentQueueReaders
+ AttributeGlobalLockStateCurrentQueueWriters
+)
+
+// String returns the string representation of the AttributeGlobalLockState.
+func (av AttributeGlobalLockState) String() string {
+ switch av {
+ case AttributeGlobalLockStateCurrentQueueTotal:
+ return "current_queue_total"
+ case AttributeGlobalLockStateCurrentQueueReaders:
+ return "current_queue_readers"
+ case AttributeGlobalLockStateCurrentQueueWriters:
+ return "current_queue_writers"
+ }
+ return ""
+}
+
+// MapAttributeGlobalLockState is a helper map of string to AttributeGlobalLockState attribute value.
+var MapAttributeGlobalLockState = map[string]AttributeGlobalLockState{
+ "current_queue_total": AttributeGlobalLockStateCurrentQueueTotal,
+ "current_queue_readers": AttributeGlobalLockStateCurrentQueueReaders,
+ "current_queue_writers": AttributeGlobalLockStateCurrentQueueWriters,
+}
+
+// AttributeMemoryIssueType specifies the a value memory_issue_type attribute.
+type AttributeMemoryIssueType int
+
+const (
+ _ AttributeMemoryIssueType = iota
+ AttributeMemoryIssueTypeExtraInfo
+ AttributeMemoryIssueTypeGlobalAccessesNotInMemory
+ AttributeMemoryIssueTypeExceptionsThrown
+)
+
+// String returns the string representation of the AttributeMemoryIssueType.
+func (av AttributeMemoryIssueType) String() string {
+ switch av {
+ case AttributeMemoryIssueTypeExtraInfo:
+ return "extra_info"
+ case AttributeMemoryIssueTypeGlobalAccessesNotInMemory:
+ return "global_accesses_not_in_memory"
+ case AttributeMemoryIssueTypeExceptionsThrown:
+ return "exceptions_thrown"
+ }
+ return ""
+}
+
+// MapAttributeMemoryIssueType is a helper map of string to AttributeMemoryIssueType attribute value.
+var MapAttributeMemoryIssueType = map[string]AttributeMemoryIssueType{
+ "extra_info": AttributeMemoryIssueTypeExtraInfo,
+ "global_accesses_not_in_memory": AttributeMemoryIssueTypeGlobalAccessesNotInMemory,
+ "exceptions_thrown": AttributeMemoryIssueTypeExceptionsThrown,
+}
+
+// AttributeMemoryState specifies the a value memory_state attribute.
+type AttributeMemoryState int
+
+const (
+ _ AttributeMemoryState = iota
+ AttributeMemoryStateResident
+ AttributeMemoryStateVirtual
+ AttributeMemoryStateMapped
+ AttributeMemoryStateComputed
+ AttributeMemoryStateShared
+ AttributeMemoryStateFree
+ AttributeMemoryStateUsed
+)
+
+// String returns the string representation of the AttributeMemoryState.
+func (av AttributeMemoryState) String() string {
+ switch av {
+ case AttributeMemoryStateResident:
+ return "resident"
+ case AttributeMemoryStateVirtual:
+ return "virtual"
+ case AttributeMemoryStateMapped:
+ return "mapped"
+ case AttributeMemoryStateComputed:
+ return "computed"
+ case AttributeMemoryStateShared:
+ return "shared"
+ case AttributeMemoryStateFree:
+ return "free"
+ case AttributeMemoryStateUsed:
+ return "used"
+ }
+ return ""
+}
+
+// MapAttributeMemoryState is a helper map of string to AttributeMemoryState attribute value.
+var MapAttributeMemoryState = map[string]AttributeMemoryState{
+ "resident": AttributeMemoryStateResident,
+ "virtual": AttributeMemoryStateVirtual,
+ "mapped": AttributeMemoryStateMapped,
+ "computed": AttributeMemoryStateComputed,
+ "shared": AttributeMemoryStateShared,
+ "free": AttributeMemoryStateFree,
+ "used": AttributeMemoryStateUsed,
+}
+
+// AttributeMemoryStatus specifies the a value memory_status attribute.
+type AttributeMemoryStatus int
+
+const (
+ _ AttributeMemoryStatus = iota
+ AttributeMemoryStatusAvailable
+ AttributeMemoryStatusBuffers
+ AttributeMemoryStatusCached
+ AttributeMemoryStatusFree
+ AttributeMemoryStatusShared
+ AttributeMemoryStatusUsed
+)
+
+// String returns the string representation of the AttributeMemoryStatus.
+func (av AttributeMemoryStatus) String() string {
+ switch av {
+ case AttributeMemoryStatusAvailable:
+ return "available"
+ case AttributeMemoryStatusBuffers:
+ return "buffers"
+ case AttributeMemoryStatusCached:
+ return "cached"
+ case AttributeMemoryStatusFree:
+ return "free"
+ case AttributeMemoryStatusShared:
+ return "shared"
+ case AttributeMemoryStatusUsed:
+ return "used"
+ }
+ return ""
+}
+
+// MapAttributeMemoryStatus is a helper map of string to AttributeMemoryStatus attribute value.
+var MapAttributeMemoryStatus = map[string]AttributeMemoryStatus{
+ "available": AttributeMemoryStatusAvailable,
+ "buffers": AttributeMemoryStatusBuffers,
+ "cached": AttributeMemoryStatusCached,
+ "free": AttributeMemoryStatusFree,
+ "shared": AttributeMemoryStatusShared,
+ "used": AttributeMemoryStatusUsed,
+}
+
+// AttributeObjectType specifies the a value object_type attribute.
+type AttributeObjectType int
+
+const (
+ _ AttributeObjectType = iota
+ AttributeObjectTypeCollection
+ AttributeObjectTypeIndex
+ AttributeObjectTypeExtent
+ AttributeObjectTypeObject
+ AttributeObjectTypeView
+ AttributeObjectTypeStorage
+ AttributeObjectTypeData
+)
+
+// String returns the string representation of the AttributeObjectType.
+func (av AttributeObjectType) String() string {
+ switch av {
+ case AttributeObjectTypeCollection:
+ return "collection"
+ case AttributeObjectTypeIndex:
+ return "index"
+ case AttributeObjectTypeExtent:
+ return "extent"
+ case AttributeObjectTypeObject:
+ return "object"
+ case AttributeObjectTypeView:
+ return "view"
+ case AttributeObjectTypeStorage:
+ return "storage"
+ case AttributeObjectTypeData:
+ return "data"
+ }
+ return ""
+}
+
+// MapAttributeObjectType is a helper map of string to AttributeObjectType attribute value.
+var MapAttributeObjectType = map[string]AttributeObjectType{
+ "collection": AttributeObjectTypeCollection,
+ "index": AttributeObjectTypeIndex,
+ "extent": AttributeObjectTypeExtent,
+ "object": AttributeObjectTypeObject,
+ "view": AttributeObjectTypeView,
+ "storage": AttributeObjectTypeStorage,
+ "data": AttributeObjectTypeData,
+}
+
+// AttributeOperation specifies the a value operation attribute.
+type AttributeOperation int
+
+const (
+ _ AttributeOperation = iota
+ AttributeOperationCmd
+ AttributeOperationQuery
+ AttributeOperationUpdate
+ AttributeOperationDelete
+ AttributeOperationGetmore
+ AttributeOperationInsert
+ AttributeOperationScanAndOrder
+)
+
+// String returns the string representation of the AttributeOperation.
+func (av AttributeOperation) String() string {
+ switch av {
+ case AttributeOperationCmd:
+ return "cmd"
+ case AttributeOperationQuery:
+ return "query"
+ case AttributeOperationUpdate:
+ return "update"
+ case AttributeOperationDelete:
+ return "delete"
+ case AttributeOperationGetmore:
+ return "getmore"
+ case AttributeOperationInsert:
+ return "insert"
+ case AttributeOperationScanAndOrder:
+ return "scan_and_order"
+ }
+ return ""
+}
+
+// MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
+var MapAttributeOperation = map[string]AttributeOperation{
+ "cmd": AttributeOperationCmd,
+ "query": AttributeOperationQuery,
+ "update": AttributeOperationUpdate,
+ "delete": AttributeOperationDelete,
+ "getmore": AttributeOperationGetmore,
+ "insert": AttributeOperationInsert,
+ "scan_and_order": AttributeOperationScanAndOrder,
+}
+
+// AttributeOplogType specifies the a value oplog_type attribute.
+type AttributeOplogType int
+
+const (
+ _ AttributeOplogType = iota
+ AttributeOplogTypeSlaveLagMasterTime
+ AttributeOplogTypeMasterTime
+ AttributeOplogTypeMasterLagTimeDiff
+)
+
+// String returns the string representation of the AttributeOplogType.
+func (av AttributeOplogType) String() string {
+ switch av {
+ case AttributeOplogTypeSlaveLagMasterTime:
+ return "slave_lag_master_time"
+ case AttributeOplogTypeMasterTime:
+ return "master_time"
+ case AttributeOplogTypeMasterLagTimeDiff:
+ return "master_lag_time_diff"
+ }
+ return ""
+}
+
+// MapAttributeOplogType is a helper map of string to AttributeOplogType attribute value.
+var MapAttributeOplogType = map[string]AttributeOplogType{
+ "slave_lag_master_time": AttributeOplogTypeSlaveLagMasterTime,
+ "master_time": AttributeOplogTypeMasterTime,
+ "master_lag_time_diff": AttributeOplogTypeMasterLagTimeDiff,
+}
+
+// AttributeScannedType specifies the a value scanned_type attribute.
+type AttributeScannedType int
+
+const (
+ _ AttributeScannedType = iota
+ AttributeScannedTypeIndexItems
+ AttributeScannedTypeObjects
+)
+
+// String returns the string representation of the AttributeScannedType.
+func (av AttributeScannedType) String() string {
+ switch av {
+ case AttributeScannedTypeIndexItems:
+ return "index_items"
+ case AttributeScannedTypeObjects:
+ return "objects"
+ }
+ return ""
+}
+
+// MapAttributeScannedType is a helper map of string to AttributeScannedType attribute value.
+var MapAttributeScannedType = map[string]AttributeScannedType{
+ "index_items": AttributeScannedTypeIndexItems,
+ "objects": AttributeScannedTypeObjects,
+}
+
+// AttributeStorageStatus specifies the a value storage_status attribute.
+type AttributeStorageStatus int
+
+const (
+ _ AttributeStorageStatus = iota
+ AttributeStorageStatusTotal
+ AttributeStorageStatusDataSize
+ AttributeStorageStatusIndexSize
+ AttributeStorageStatusDataSizeWoSystem
+)
+
+// String returns the string representation of the AttributeStorageStatus.
+func (av AttributeStorageStatus) String() string {
+ switch av {
+ case AttributeStorageStatusTotal:
+ return "total"
+ case AttributeStorageStatusDataSize:
+ return "data_size"
+ case AttributeStorageStatusIndexSize:
+ return "index_size"
+ case AttributeStorageStatusDataSizeWoSystem:
+ return "data_size_wo_system"
+ }
+ return ""
+}
+
+// MapAttributeStorageStatus is a helper map of string to AttributeStorageStatus attribute value.
+var MapAttributeStorageStatus = map[string]AttributeStorageStatus{
+ "total": AttributeStorageStatusTotal,
+ "data_size": AttributeStorageStatusDataSize,
+ "index_size": AttributeStorageStatusIndexSize,
+ "data_size_wo_system": AttributeStorageStatusDataSizeWoSystem,
+}
+
+// AttributeTicketType specifies the a value ticket_type attribute.
+type AttributeTicketType int
+
+const (
+ _ AttributeTicketType = iota
+ AttributeTicketTypeAvailableReads
+ AttributeTicketTypeAvailableWrites
+)
+
+// String returns the string representation of the AttributeTicketType.
+func (av AttributeTicketType) String() string {
+ switch av {
+ case AttributeTicketTypeAvailableReads:
+ return "available_reads"
+ case AttributeTicketTypeAvailableWrites:
+ return "available_writes"
+ }
+ return ""
+}
+
+// MapAttributeTicketType is a helper map of string to AttributeTicketType attribute value.
+var MapAttributeTicketType = map[string]AttributeTicketType{
+ "available_reads": AttributeTicketTypeAvailableReads,
+ "available_writes": AttributeTicketTypeAvailableWrites,
+}
+
type metricMongodbatlasDbCounts struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -3392,7 +4112,7 @@ func (m *metricMongodbatlasSystemPagingUsageAverage) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
+func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.settings.Enabled {
return
}
@@ -3400,7 +4120,7 @@ func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pcomm
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleVal(val)
- dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+ dp.Attributes().Insert(A.MemoryState, pcommon.NewValueString(memoryStateAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3443,7 +4163,7 @@ func (m *metricMongodbatlasSystemPagingUsageMax) init() {
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
-func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
+func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.settings.Enabled {
return
}
@@ -3451,7 +4171,7 @@ func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pcommon.T
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleVal(val)
- dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+ dp.Attributes().Insert(A.MemoryState, pcommon.NewValueString(memoryStateAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
@@ -3804,68 +4524,68 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordMongodbatlasDbCountsDataPoint adds a data point to mongodbatlas.db.counts metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDbCountsDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) {
- mb.metricMongodbatlasDbCounts.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDbCountsDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue AttributeObjectType) {
+ mb.metricMongodbatlasDbCounts.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue.String())
}
// RecordMongodbatlasDbSizeDataPoint adds a data point to mongodbatlas.db.size metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDbSizeDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) {
- mb.metricMongodbatlasDbSize.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDbSizeDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue AttributeObjectType) {
+ mb.metricMongodbatlasDbSize.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionIopsAverageDataPoint adds a data point to mongodbatlas.disk.partition.iops.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionIopsAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
+ mb.metricMongodbatlasDiskPartitionIopsAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionIopsMaxDataPoint adds a data point to mongodbatlas.disk.partition.iops.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionIopsMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
+ mb.metricMongodbatlasDiskPartitionIopsMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionLatencyAverageDataPoint adds a data point to mongodbatlas.disk.partition.latency.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionLatencyAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
+ mb.metricMongodbatlasDiskPartitionLatencyAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionLatencyMaxDataPoint adds a data point to mongodbatlas.disk.partition.latency.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionLatencyMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
+ mb.metricMongodbatlasDiskPartitionLatencyMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionSpaceAverageDataPoint adds a data point to mongodbatlas.disk.partition.space.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionSpaceAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
+ mb.metricMongodbatlasDiskPartitionSpaceAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionSpaceMaxDataPoint adds a data point to mongodbatlas.disk.partition.space.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionSpaceMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
+ mb.metricMongodbatlasDiskPartitionSpaceMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUsageAverageDataPoint adds a data point to mongodbatlas.disk.partition.usage.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionUsageAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
+ mb.metricMongodbatlasDiskPartitionUsageAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUsageMaxDataPoint adds a data point to mongodbatlas.disk.partition.usage.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionUsageMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
+ mb.metricMongodbatlasDiskPartitionUsageMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint adds a data point to mongodbatlas.disk.partition.utilization.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionUtilizationAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
+ mb.metricMongodbatlasDiskPartitionUtilizationAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint adds a data point to mongodbatlas.disk.partition.utilization.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
- mb.metricMongodbatlasDiskPartitionUtilizationMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
+ mb.metricMongodbatlasDiskPartitionUtilizationMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasProcessAssertsDataPoint adds a data point to mongodbatlas.process.asserts metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessAssertsDataPoint(ts pcommon.Timestamp, val float64, assertTypeAttributeValue string) {
- mb.metricMongodbatlasProcessAsserts.recordDataPoint(mb.startTime, ts, val, assertTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessAssertsDataPoint(ts pcommon.Timestamp, val float64, assertTypeAttributeValue AttributeAssertType) {
+ mb.metricMongodbatlasProcessAsserts.recordDataPoint(mb.startTime, ts, val, assertTypeAttributeValue.String())
}
// RecordMongodbatlasProcessBackgroundFlushDataPoint adds a data point to mongodbatlas.process.background_flush metric.
@@ -3874,13 +4594,13 @@ func (mb *MetricsBuilder) RecordMongodbatlasProcessBackgroundFlushDataPoint(ts p
}
// RecordMongodbatlasProcessCacheIoDataPoint adds a data point to mongodbatlas.process.cache.io metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pcommon.Timestamp, val float64, cacheDirectionAttributeValue string) {
- mb.metricMongodbatlasProcessCacheIo.recordDataPoint(mb.startTime, ts, val, cacheDirectionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pcommon.Timestamp, val float64, cacheDirectionAttributeValue AttributeCacheDirection) {
+ mb.metricMongodbatlasProcessCacheIo.recordDataPoint(mb.startTime, ts, val, cacheDirectionAttributeValue.String())
}
// RecordMongodbatlasProcessCacheSizeDataPoint adds a data point to mongodbatlas.process.cache.size metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pcommon.Timestamp, val float64, cacheStatusAttributeValue string) {
- mb.metricMongodbatlasProcessCacheSize.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pcommon.Timestamp, val float64, cacheStatusAttributeValue AttributeCacheStatus) {
+ mb.metricMongodbatlasProcessCacheSize.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue.String())
}
// RecordMongodbatlasProcessConnectionsDataPoint adds a data point to mongodbatlas.process.connections metric.
@@ -3889,88 +4609,88 @@ func (mb *MetricsBuilder) RecordMongodbatlasProcessConnectionsDataPoint(ts pcomm
}
// RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasProcessCPUChildrenUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasProcessCPUChildrenUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasProcessCPUChildrenUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasProcessCPUChildrenUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasProcessCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasProcessCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasProcessCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasProcessCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.usage.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasProcessCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasProcessCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.usage.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasProcessCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasProcessCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCursorsDataPoint adds a data point to mongodbatlas.process.cursors metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessCursorsDataPoint(ts pcommon.Timestamp, val float64, cursorStateAttributeValue string) {
- mb.metricMongodbatlasProcessCursors.recordDataPoint(mb.startTime, ts, val, cursorStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCursorsDataPoint(ts pcommon.Timestamp, val float64, cursorStateAttributeValue AttributeCursorState) {
+ mb.metricMongodbatlasProcessCursors.recordDataPoint(mb.startTime, ts, val, cursorStateAttributeValue.String())
}
// RecordMongodbatlasProcessDbDocumentRateDataPoint adds a data point to mongodbatlas.process.db.document.rate metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessDbDocumentRateDataPoint(ts pcommon.Timestamp, val float64, documentStatusAttributeValue string) {
- mb.metricMongodbatlasProcessDbDocumentRate.recordDataPoint(mb.startTime, ts, val, documentStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessDbDocumentRateDataPoint(ts pcommon.Timestamp, val float64, documentStatusAttributeValue AttributeDocumentStatus) {
+ mb.metricMongodbatlasProcessDbDocumentRate.recordDataPoint(mb.startTime, ts, val, documentStatusAttributeValue.String())
}
// RecordMongodbatlasProcessDbOperationsRateDataPoint adds a data point to mongodbatlas.process.db.operations.rate metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsRateDataPoint(ts pcommon.Timestamp, val float64, operationAttributeValue string, clusterRoleAttributeValue string) {
- mb.metricMongodbatlasProcessDbOperationsRate.recordDataPoint(mb.startTime, ts, val, operationAttributeValue, clusterRoleAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsRateDataPoint(ts pcommon.Timestamp, val float64, operationAttributeValue AttributeOperation, clusterRoleAttributeValue AttributeClusterRole) {
+ mb.metricMongodbatlasProcessDbOperationsRate.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String(), clusterRoleAttributeValue.String())
}
// RecordMongodbatlasProcessDbOperationsTimeDataPoint adds a data point to mongodbatlas.process.db.operations.time metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts pcommon.Timestamp, val float64, executionTypeAttributeValue string) {
- mb.metricMongodbatlasProcessDbOperationsTime.recordDataPoint(mb.startTime, ts, val, executionTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts pcommon.Timestamp, val float64, executionTypeAttributeValue AttributeExecutionType) {
+ mb.metricMongodbatlasProcessDbOperationsTime.recordDataPoint(mb.startTime, ts, val, executionTypeAttributeValue.String())
}
// RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint adds a data point to mongodbatlas.process.db.query_executor.scanned metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) {
- mb.metricMongodbatlasProcessDbQueryExecutorScanned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue AttributeScannedType) {
+ mb.metricMongodbatlasProcessDbQueryExecutorScanned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue.String())
}
// RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint adds a data point to mongodbatlas.process.db.query_targeting.scanned_per_returned metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) {
- mb.metricMongodbatlasProcessDbQueryTargetingScannedPerReturned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue AttributeScannedType) {
+ mb.metricMongodbatlasProcessDbQueryTargetingScannedPerReturned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue.String())
}
// RecordMongodbatlasProcessDbStorageDataPoint adds a data point to mongodbatlas.process.db.storage metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessDbStorageDataPoint(ts pcommon.Timestamp, val float64, storageStatusAttributeValue string) {
- mb.metricMongodbatlasProcessDbStorage.recordDataPoint(mb.startTime, ts, val, storageStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessDbStorageDataPoint(ts pcommon.Timestamp, val float64, storageStatusAttributeValue AttributeStorageStatus) {
+ mb.metricMongodbatlasProcessDbStorage.recordDataPoint(mb.startTime, ts, val, storageStatusAttributeValue.String())
}
// RecordMongodbatlasProcessFtsCPUUsageDataPoint adds a data point to mongodbatlas.process.fts.cpu.usage metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessFtsCPUUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasProcessFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessFtsCPUUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasProcessFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessGlobalLockDataPoint adds a data point to mongodbatlas.process.global_lock metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessGlobalLockDataPoint(ts pcommon.Timestamp, val float64, globalLockStateAttributeValue string) {
- mb.metricMongodbatlasProcessGlobalLock.recordDataPoint(mb.startTime, ts, val, globalLockStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessGlobalLockDataPoint(ts pcommon.Timestamp, val float64, globalLockStateAttributeValue AttributeGlobalLockState) {
+ mb.metricMongodbatlasProcessGlobalLock.recordDataPoint(mb.startTime, ts, val, globalLockStateAttributeValue.String())
}
// RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint adds a data point to mongodbatlas.process.index.btree_miss_ratio metric.
@@ -3979,8 +4699,8 @@ func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(
}
// RecordMongodbatlasProcessIndexCountersDataPoint adds a data point to mongodbatlas.process.index.counters metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexCountersDataPoint(ts pcommon.Timestamp, val float64, btreeCounterTypeAttributeValue string) {
- mb.metricMongodbatlasProcessIndexCounters.recordDataPoint(mb.startTime, ts, val, btreeCounterTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexCountersDataPoint(ts pcommon.Timestamp, val float64, btreeCounterTypeAttributeValue AttributeBtreeCounterType) {
+ mb.metricMongodbatlasProcessIndexCounters.recordDataPoint(mb.startTime, ts, val, btreeCounterTypeAttributeValue.String())
}
// RecordMongodbatlasProcessJournalingCommitsDataPoint adds a data point to mongodbatlas.process.journaling.commits metric.
@@ -3999,13 +4719,13 @@ func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingWrittenDataPoint(ts
}
// RecordMongodbatlasProcessMemoryUsageDataPoint adds a data point to mongodbatlas.process.memory.usage metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
- mb.metricMongodbatlasProcessMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
+ mb.metricMongodbatlasProcessMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// RecordMongodbatlasProcessNetworkIoDataPoint adds a data point to mongodbatlas.process.network.io metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkIoDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) {
- mb.metricMongodbatlasProcessNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkIoDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
+ mb.metricMongodbatlasProcessNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasProcessNetworkRequestsDataPoint adds a data point to mongodbatlas.process.network.requests metric.
@@ -4019,13 +4739,13 @@ func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogRateDataPoint(ts pcommon
}
// RecordMongodbatlasProcessOplogTimeDataPoint adds a data point to mongodbatlas.process.oplog.time metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogTimeDataPoint(ts pcommon.Timestamp, val float64, oplogTypeAttributeValue string) {
- mb.metricMongodbatlasProcessOplogTime.recordDataPoint(mb.startTime, ts, val, oplogTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogTimeDataPoint(ts pcommon.Timestamp, val float64, oplogTypeAttributeValue AttributeOplogType) {
+ mb.metricMongodbatlasProcessOplogTime.recordDataPoint(mb.startTime, ts, val, oplogTypeAttributeValue.String())
}
// RecordMongodbatlasProcessPageFaultsDataPoint adds a data point to mongodbatlas.process.page_faults metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessPageFaultsDataPoint(ts pcommon.Timestamp, val float64, memoryIssueTypeAttributeValue string) {
- mb.metricMongodbatlasProcessPageFaults.recordDataPoint(mb.startTime, ts, val, memoryIssueTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessPageFaultsDataPoint(ts pcommon.Timestamp, val float64, memoryIssueTypeAttributeValue AttributeMemoryIssueType) {
+ mb.metricMongodbatlasProcessPageFaults.recordDataPoint(mb.startTime, ts, val, memoryIssueTypeAttributeValue.String())
}
// RecordMongodbatlasProcessRestartsDataPoint adds a data point to mongodbatlas.process.restarts metric.
@@ -4034,38 +4754,38 @@ func (mb *MetricsBuilder) RecordMongodbatlasProcessRestartsDataPoint(ts pcommon.
}
// RecordMongodbatlasProcessTicketsDataPoint adds a data point to mongodbatlas.process.tickets metric.
-func (mb *MetricsBuilder) RecordMongodbatlasProcessTicketsDataPoint(ts pcommon.Timestamp, val float64, ticketTypeAttributeValue string) {
- mb.metricMongodbatlasProcessTickets.recordDataPoint(mb.startTime, ts, val, ticketTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasProcessTicketsDataPoint(ts pcommon.Timestamp, val float64, ticketTypeAttributeValue AttributeTicketType) {
+ mb.metricMongodbatlasProcessTickets.recordDataPoint(mb.startTime, ts, val, ticketTypeAttributeValue.String())
}
// RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasSystemCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasSystemCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasSystemCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasSystemCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemCPUUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.usage.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasSystemCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasSystemCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemCPUUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.usage.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasSystemCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasSystemCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.normalized.usage metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasSystemFtsCPUNormalizedUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasSystemFtsCPUNormalizedUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemFtsCPUUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.usage metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
- mb.metricMongodbatlasSystemFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
+ mb.metricMongodbatlasSystemFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemFtsDiskUsedDataPoint adds a data point to mongodbatlas.system.fts.disk.used metric.
@@ -4074,48 +4794,48 @@ func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts pcommo
}
// RecordMongodbatlasSystemFtsMemoryUsageDataPoint adds a data point to mongodbatlas.system.fts.memory.usage metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
- mb.metricMongodbatlasSystemFtsMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
+ mb.metricMongodbatlasSystemFtsMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// RecordMongodbatlasSystemMemoryUsageAverageDataPoint adds a data point to mongodbatlas.system.memory.usage.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) {
- mb.metricMongodbatlasSystemMemoryUsageAverage.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue AttributeMemoryStatus) {
+ mb.metricMongodbatlasSystemMemoryUsageAverage.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue.String())
}
// RecordMongodbatlasSystemMemoryUsageMaxDataPoint adds a data point to mongodbatlas.system.memory.usage.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) {
- mb.metricMongodbatlasSystemMemoryUsageMax.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue AttributeMemoryStatus) {
+ mb.metricMongodbatlasSystemMemoryUsageMax.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue.String())
}
// RecordMongodbatlasSystemNetworkIoAverageDataPoint adds a data point to mongodbatlas.system.network.io.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) {
- mb.metricMongodbatlasSystemNetworkIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
+ mb.metricMongodbatlasSystemNetworkIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemNetworkIoMaxDataPoint adds a data point to mongodbatlas.system.network.io.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) {
- mb.metricMongodbatlasSystemNetworkIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
+ mb.metricMongodbatlasSystemNetworkIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemPagingIoAverageDataPoint adds a data point to mongodbatlas.system.paging.io.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) {
- mb.metricMongodbatlasSystemPagingIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
+ mb.metricMongodbatlasSystemPagingIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemPagingIoMaxDataPoint adds a data point to mongodbatlas.system.paging.io.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) {
- mb.metricMongodbatlasSystemPagingIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
+ mb.metricMongodbatlasSystemPagingIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemPagingUsageAverageDataPoint adds a data point to mongodbatlas.system.paging.usage.average metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) {
- mb.metricMongodbatlasSystemPagingUsageAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
+ mb.metricMongodbatlasSystemPagingUsageAverage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// RecordMongodbatlasSystemPagingUsageMaxDataPoint adds a data point to mongodbatlas.system.paging.usage.max metric.
-func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) {
- mb.metricMongodbatlasSystemPagingUsageMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
+ mb.metricMongodbatlasSystemPagingUsageMax.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
@@ -4200,275 +4920,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeAssertType are the possible values that the attribute "assert_type" can have.
-var AttributeAssertType = struct {
- Regular string
- Warning string
- Msg string
- User string
-}{
- "regular",
- "warning",
- "msg",
- "user",
-}
-
-// AttributeBtreeCounterType are the possible values that the attribute "btree_counter_type" can have.
-var AttributeBtreeCounterType = struct {
- Accesses string
- Hits string
- Misses string
-}{
- "accesses",
- "hits",
- "misses",
-}
-
-// AttributeCacheDirection are the possible values that the attribute "cache_direction" can have.
-var AttributeCacheDirection = struct {
- ReadInto string
- WrittenFrom string
-}{
- "read_into",
- "written_from",
-}
-
-// AttributeCacheStatus are the possible values that the attribute "cache_status" can have.
-var AttributeCacheStatus = struct {
- Dirty string
- Used string
-}{
- "dirty",
- "used",
-}
-
-// AttributeClusterRole are the possible values that the attribute "cluster_role" can have.
-var AttributeClusterRole = struct {
- Primary string
- Replica string
-}{
- "primary",
- "replica",
-}
-
-// AttributeCPUState are the possible values that the attribute "cpu_state" can have.
-var AttributeCPUState = struct {
- Kernel string
- User string
- Nice string
- Iowait string
- Irq string
- Softirq string
- Guest string
- Steal string
-}{
- "kernel",
- "user",
- "nice",
- "iowait",
- "irq",
- "softirq",
- "guest",
- "steal",
-}
-
-// AttributeCursorState are the possible values that the attribute "cursor_state" can have.
-var AttributeCursorState = struct {
- TimedOut string
- Open string
-}{
- "timed_out",
- "open",
-}
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Receive string
- Transmit string
-}{
- "receive",
- "transmit",
-}
-
-// AttributeDiskDirection are the possible values that the attribute "disk_direction" can have.
-var AttributeDiskDirection = struct {
- Read string
- Write string
- Total string
-}{
- "read",
- "write",
- "total",
-}
-
-// AttributeDiskStatus are the possible values that the attribute "disk_status" can have.
-var AttributeDiskStatus = struct {
- Free string
- Used string
-}{
- "free",
- "used",
-}
-
-// AttributeDocumentStatus are the possible values that the attribute "document_status" can have.
-var AttributeDocumentStatus = struct {
- Returned string
- Inserted string
- Updated string
- Deleted string
-}{
- "returned",
- "inserted",
- "updated",
- "deleted",
-}
-
-// AttributeExecutionType are the possible values that the attribute "execution_type" can have.
-var AttributeExecutionType = struct {
- Reads string
- Writes string
- Commands string
-}{
- "reads",
- "writes",
- "commands",
-}
-
-// AttributeGlobalLockState are the possible values that the attribute "global_lock_state" can have.
-var AttributeGlobalLockState = struct {
- CurrentQueueTotal string
- CurrentQueueReaders string
- CurrentQueueWriters string
-}{
- "current_queue_total",
- "current_queue_readers",
- "current_queue_writers",
-}
-
-// AttributeMemoryIssueType are the possible values that the attribute "memory_issue_type" can have.
-var AttributeMemoryIssueType = struct {
- ExtraInfo string
- GlobalAccessesNotInMemory string
- ExceptionsThrown string
-}{
- "extra_info",
- "global_accesses_not_in_memory",
- "exceptions_thrown",
-}
-
-// AttributeMemoryState are the possible values that the attribute "memory_state" can have.
-var AttributeMemoryState = struct {
- Resident string
- Virtual string
- Mapped string
- Computed string
- Shared string
- Free string
- Used string
-}{
- "resident",
- "virtual",
- "mapped",
- "computed",
- "shared",
- "free",
- "used",
-}
-
-// AttributeMemoryStatus are the possible values that the attribute "memory_status" can have.
-var AttributeMemoryStatus = struct {
- Available string
- Buffers string
- Cached string
- Free string
- Shared string
- Used string
-}{
- "available",
- "buffers",
- "cached",
- "free",
- "shared",
- "used",
-}
-
-// AttributeObjectType are the possible values that the attribute "object_type" can have.
-var AttributeObjectType = struct {
- Collection string
- Index string
- Extent string
- Object string
- View string
- Storage string
- Data string
-}{
- "collection",
- "index",
- "extent",
- "object",
- "view",
- "storage",
- "data",
-}
-
-// AttributeOperation are the possible values that the attribute "operation" can have.
-var AttributeOperation = struct {
- Cmd string
- Query string
- Update string
- Delete string
- Getmore string
- Insert string
- ScanAndOrder string
-}{
- "cmd",
- "query",
- "update",
- "delete",
- "getmore",
- "insert",
- "scan_and_order",
-}
-
-// AttributeOplogType are the possible values that the attribute "oplog_type" can have.
-var AttributeOplogType = struct {
- SlaveLagMasterTime string
- MasterTime string
- MasterLagTimeDiff string
-}{
- "slave_lag_master_time",
- "master_time",
- "master_lag_time_diff",
-}
-
-// AttributeScannedType are the possible values that the attribute "scanned_type" can have.
-var AttributeScannedType = struct {
- IndexItems string
- Objects string
-}{
- "index_items",
- "objects",
-}
-
-// AttributeStorageStatus are the possible values that the attribute "storage_status" can have.
-var AttributeStorageStatus = struct {
- Total string
- DataSize string
- IndexSize string
- DataSizeWoSystem string
-}{
- "total",
- "data_size",
- "index_size",
- "data_size_wo_system",
-}
-
-// AttributeTicketType are the possible values that the attribute "ticket_type" can have.
-var AttributeTicketType = struct {
- AvailableReads string
- AvailableWrites string
-}{
- "available_reads",
- "available_writes",
-}
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go
index 588d89a11139..b9c89d38daac 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go
@@ -31,107 +31,107 @@ func getRecordFunc(metricName string) metricRecordFunc {
case "PROCESS_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_PROCESS_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "PROCESS_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_PROCESS_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
// MongoDB CPU usage scaled to a range of 0% to 100%. Atlas computes this value by dividing by the number of CPU cores.
case "PROCESS_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_PROCESS_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "PROCESS_NORMALIZED_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
// Context: Process
case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
// Rate of asserts for a MongoDB process found in the asserts document that the serverStatus command generates.
case "ASSERT_REGULAR":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.Regular)
+ mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeRegular)
}
case "ASSERT_WARNING":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.Warning)
+ mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeWarning)
}
case "ASSERT_MSG":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.Msg)
+ mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeMsg)
}
case "ASSERT_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.User)
+ mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeUser)
}
// Amount of data flushed in the background.
@@ -145,32 +145,32 @@ func getRecordFunc(metricName string) metricRecordFunc {
case "CACHE_BYTES_READ_INTO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirection.ReadInto)
+ mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirectionReadInto)
}
case "CACHE_BYTES_WRITTEN_FROM":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirection.WrittenFrom)
+ mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirectionWrittenFrom)
}
case "CACHE_DIRTY_BYTES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatus.Dirty)
+ mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatusDirty)
}
case "CACHE_USED_BYTES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatus.Used)
+ mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatusUsed)
}
case "TICKETS_AVAILABLE_READS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketType.AvailableReads)
+ mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketTypeAvailableReads)
}
case "TICKETS_AVAILABLE_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketType.AvailableWrites)
+ mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketTypeAvailableWrites)
}
// Number of connections to a MongoDB process found in the connections document that the serverStatus command generates.
@@ -182,55 +182,55 @@ func getRecordFunc(metricName string) metricRecordFunc {
// Number of cursors for a MongoDB process found in the metrics.cursor document that the serverStatus command generates.
case "CURSORS_TOTAL_OPEN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorState.Open)
+ mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorStateOpen)
}
case "CURSORS_TOTAL_TIMED_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorState.TimedOut)
+ mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorStateTimedOut)
}
// Numbers of Memory Issues and Page Faults for a MongoDB process.
case "EXTRA_INFO_PAGE_FAULTS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueType.ExtraInfo)
+ mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueTypeExtraInfo)
}
case "GLOBAL_ACCESSES_NOT_IN_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueType.GlobalAccessesNotInMemory)
+ mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueTypeGlobalAccessesNotInMemory)
}
case "GLOBAL_PAGE_FAULT_EXCEPTIONS_THROWN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueType.ExceptionsThrown)
+ mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueTypeExceptionsThrown)
}
// Number of operations waiting on locks for the MongoDB process that the serverStatus command generates. Cloud Manager computes these values based on the type of storage engine.
case "GLOBAL_LOCK_CURRENT_QUEUE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockState.CurrentQueueTotal)
+ mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockStateCurrentQueueTotal)
}
case "GLOBAL_LOCK_CURRENT_QUEUE_READERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockState.CurrentQueueReaders)
+ mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockStateCurrentQueueReaders)
}
case "GLOBAL_LOCK_CURRENT_QUEUE_WRITERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockState.CurrentQueueWriters)
+ mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockStateCurrentQueueWriters)
}
// Number of index btree operations.
case "INDEX_COUNTERS_BTREE_ACCESSES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterType.Accesses)
+ mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterTypeAccesses)
}
case "INDEX_COUNTERS_BTREE_HITS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterType.Hits)
+ mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterTypeHits)
}
case "INDEX_COUNTERS_BTREE_MISSES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterType.Misses)
+ mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterTypeMisses)
}
case "INDEX_COUNTERS_BTREE_MISS_RATIO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
@@ -254,31 +254,31 @@ func getRecordFunc(metricName string) metricRecordFunc {
// Amount of memory for a MongoDB process found in the mem document that the serverStatus command collects.
case "MEMORY_RESIDENT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Resident)
+ mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateResident)
}
case "MEMORY_VIRTUAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Virtual)
+ mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateVirtual)
}
case "MEMORY_MAPPED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Mapped)
+ mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateMapped)
}
case "COMPUTED_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Computed)
+ mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateComputed)
}
// Amount of throughput for MongoDB process found in the network document that the serverStatus command collects.
case "NETWORK_BYTES_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive)
+ mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "NETWORK_BYTES_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit)
+ mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
case "NETWORK_NUM_REQUESTS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
@@ -288,15 +288,15 @@ func getRecordFunc(metricName string) metricRecordFunc {
// Durations and throughput of the MongoDB process' oplog.
case "OPLOG_SLAVE_LAG_MASTER_TIME":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogType.SlaveLagMasterTime)
+ mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogTypeSlaveLagMasterTime)
}
case "OPLOG_MASTER_TIME":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogType.MasterTime)
+ mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogTypeMasterTime)
}
case "OPLOG_MASTER_LAG_TIME_DIFF":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogType.MasterLagTimeDiff)
+ mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogTypeMasterLagTimeDiff)
}
case "OPLOG_RATE_GB_PER_HOUR":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
@@ -307,102 +307,102 @@ func getRecordFunc(metricName string) metricRecordFunc {
case "DB_STORAGE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.Total)
+ mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusTotal)
}
case "DB_DATA_SIZE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.DataSize)
+ mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusDataSize)
}
case "DB_INDEX_SIZE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.IndexSize)
+ mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusIndexSize)
}
case "DB_DATA_SIZE_TOTAL_WO_SYSTEM":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.DataSizeWoSystem)
+ mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusDataSizeWoSystem)
}
// Rate of database operations on a MongoDB process since the process last started found in the opcounters document that the serverStatus command collects.
case "OPCOUNTER_CMD":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Cmd, AttributeClusterRole.Primary)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationCmd, AttributeClusterRolePrimary)
}
case "OPCOUNTER_QUERY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Query, AttributeClusterRole.Primary)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationQuery, AttributeClusterRolePrimary)
}
case "OPCOUNTER_UPDATE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Update, AttributeClusterRole.Primary)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationUpdate, AttributeClusterRolePrimary)
}
case "OPCOUNTER_DELETE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Delete, AttributeClusterRole.Primary)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationDelete, AttributeClusterRolePrimary)
}
case "OPCOUNTER_GETMORE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Getmore, AttributeClusterRole.Primary)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationGetmore, AttributeClusterRolePrimary)
}
case "OPCOUNTER_INSERT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Insert, AttributeClusterRole.Primary)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationInsert, AttributeClusterRolePrimary)
}
// Rate of database operations on MongoDB secondaries found in the opcountersRepl document that the serverStatus command collects.
case "OPCOUNTER_REPL_CMD":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Cmd, AttributeClusterRole.Replica)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationCmd, AttributeClusterRoleReplica)
}
case "OPCOUNTER_REPL_UPDATE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Update, AttributeClusterRole.Replica)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationUpdate, AttributeClusterRoleReplica)
}
case "OPCOUNTER_REPL_DELETE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Delete, AttributeClusterRole.Replica)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationDelete, AttributeClusterRoleReplica)
}
case "OPCOUNTER_REPL_INSERT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Insert, AttributeClusterRole.Replica)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationInsert, AttributeClusterRoleReplica)
}
// Average rate of documents returned, inserted, updated, or deleted per second during a selected time period.
case "DOCUMENT_METRICS_RETURNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Returned)
+ mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusReturned)
}
case "DOCUMENT_METRICS_INSERTED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Inserted)
+ mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusInserted)
}
case "DOCUMENT_METRICS_UPDATED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Updated)
+ mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusUpdated)
}
case "DOCUMENT_METRICS_DELETED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Deleted)
+ mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusDeleted)
}
// Average rate for operations per second during a selected time period that perform a sort but cannot perform the sort using an index.
case "OPERATIONS_SCAN_AND_ORDER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.ScanAndOrder, AttributeClusterRole.Primary)
+ mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationScanAndOrder, AttributeClusterRolePrimary)
}
// Average execution time in milliseconds per read, write, or command operation during a selected time period.
case "OP_EXECUTION_TIME_READS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionType.Reads)
+ mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionTypeReads)
}
case "OP_EXECUTION_TIME_WRITES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionType.Writes)
+ mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionTypeWrites)
}
case "OP_EXECUTION_TIME_COMMANDS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionType.Commands)
+ mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionTypeCommands)
}
// Number of times the host restarted within the previous hour.
@@ -414,278 +414,278 @@ func getRecordFunc(metricName string) metricRecordFunc {
// Average rate per second to scan index items during queries and query-plan evaluations found in the value of totalKeysExamined from the explain command.
case "QUERY_EXECUTOR_SCANNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedType.IndexItems)
+ mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeIndexItems)
}
// Average rate of documents scanned per second during queries and query-plan evaluations found in the value of totalDocsExamined from the explain command.
case "QUERY_EXECUTOR_SCANNED_OBJECTS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedType.Objects)
+ mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeObjects)
}
// Ratio of the number of index items scanned to the number of documents returned.
case "QUERY_TARGETING_SCANNED_PER_RETURNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedType.IndexItems)
+ mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeIndexItems)
}
// Ratio of the number of documents scanned to the number of documents returned.
case "QUERY_TARGETING_SCANNED_OBJECTS_PER_RETURNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedType.Objects)
+ mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeObjects)
}
// CPU usage of processes on the host. For hosts with more than one CPU core, this value can exceed 100%.
case "SYSTEM_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_SYSTEM_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "SYSTEM_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_SYSTEM_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "SYSTEM_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice)
+ mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "MAX_SYSTEM_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice)
+ mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "SYSTEM_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait)
+ mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "MAX_SYSTEM_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait)
+ mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "SYSTEM_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq)
+ mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "MAX_SYSTEM_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq)
+ mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "SYSTEM_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq)
+ mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "MAX_SYSTEM_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq)
+ mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "SYSTEM_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest)
+ mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "MAX_SYSTEM_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest)
+ mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "SYSTEM_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal)
+ mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
case "MAX_SYSTEM_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal)
+ mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
// CPU usage of processes on the host scaled to a range of 0 to 100% by dividing by the number of CPU cores.
case "SYSTEM_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_SYSTEM_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_SYSTEM_NORMALIZED_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "SYSTEM_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_SYSTEM_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "SYSTEM_NORMALIZED_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "SYSTEM_NORMALIZED_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "MAX_SYSTEM_NORMALIZED_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "SYSTEM_NORMALIZED_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "MAX_SYSTEM_NORMALIZED_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "SYSTEM_NORMALIZED_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "MAX_SYSTEM_NORMALIZED_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "SYSTEM_NORMALIZED_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "MAX_SYSTEM_NORMALIZED_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "SYSTEM_NORMALIZED_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
case "MAX_SYSTEM_NORMALIZED_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal)
+ mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
// Physical memory usage, in bytes, that the host uses.
case "SYSTEM_MEMORY_AVAILABLE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Available)
+ mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusAvailable)
}
case "MAX_SYSTEM_MEMORY_AVAILABLE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Available)
+ mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusAvailable)
}
case "SYSTEM_MEMORY_BUFFERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Buffers)
+ mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusBuffers)
}
case "MAX_SYSTEM_MEMORY_BUFFERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Buffers)
+ mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusBuffers)
}
case "SYSTEM_MEMORY_CACHED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Cached)
+ mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusCached)
}
case "MAX_SYSTEM_MEMORY_CACHED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Cached)
+ mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusCached)
}
case "SYSTEM_MEMORY_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Free)
+ mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusFree)
}
case "MAX_SYSTEM_MEMORY_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Free)
+ mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusFree)
}
case "SYSTEM_MEMORY_SHARED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Shared)
+ mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusShared)
}
case "MAX_SYSTEM_MEMORY_SHARED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Shared)
+ mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusShared)
}
case "SYSTEM_MEMORY_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Used)
+ mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusUsed)
}
case "MAX_SYSTEM_MEMORY_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Used)
+ mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusUsed)
}
// Average rate of physical bytes per second that the eth0 network interface received and transmitted.
case "SYSTEM_NETWORK_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive)
+ mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "MAX_SYSTEM_NETWORK_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive)
+ mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "SYSTEM_NETWORK_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit)
+ mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
case "MAX_SYSTEM_NETWORK_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit)
+ mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
// Total amount of memory that swap uses.
case "SWAP_USAGE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Used)
+ mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateUsed)
}
case "MAX_SWAP_USAGE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Used)
+ mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStateUsed)
}
case "SWAP_USAGE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Free)
+ mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateFree)
}
case "MAX_SWAP_USAGE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Free)
+ mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStateFree)
}
// Total amount of memory written and read from swap.
case "SWAP_IO_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive)
+ mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "MAX_SWAP_IO_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive)
+ mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "SWAP_IO_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit)
+ mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
case "MAX_SWAP_IO_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit)
+ mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
// Memory usage, in bytes, that Atlas Search processes use.
case "FTS_PROCESS_RESIDENT_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Resident)
+ mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateResident)
}
case "FTS_PROCESS_VIRTUAL_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Virtual)
+ mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateVirtual)
}
case "FTS_PROCESS_SHARED_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Shared)
+ mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateShared)
}
case "FTS_MEMORY_MAPPED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Mapped)
+ mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateMapped)
}
// Disk space, in bytes, that Atlas Search indexes use.
@@ -697,19 +697,19 @@ func getRecordFunc(metricName string) metricRecordFunc {
// Percentage of CPU that Atlas Search processes use.
case "FTS_PROCESS_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "FTS_PROCESS_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "FTS_PROCESS_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User)
+ mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "FTS_PROCESS_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel)
+ mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
// Process Disk Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/)
@@ -717,130 +717,130 @@ func getRecordFunc(metricName string) metricRecordFunc {
// Measures throughput of I/O operations for the disk partition used for MongoDB.
case "DISK_PARTITION_IOPS_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read)
+ mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "MAX_DISK_PARTITION_IOPS_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read)
+ mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "DISK_PARTITION_IOPS_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write)
+ mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
case "MAX_DISK_PARTITION_IOPS_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write)
+ mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
case "DISK_PARTITION_IOPS_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Total)
+ mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
}
case "MAX_DISK_PARTITION_IOPS_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Total)
+ mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
}
// The percentage of time during which requests are being issued to and serviced by the partition.
// This includes requests from any process, not just MongoDB processes.
case "DISK_PARTITION_LATENCY_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read)
+ mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "MAX_DISK_PARTITION_LATENCY_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read)
+ mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "DISK_PARTITION_LATENCY_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write)
+ mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
case "MAX_DISK_PARTITION_LATENCY_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write)
+ mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
// Measures latency per operation type of the disk partition used by MongoDB.
case "DISK_PARTITION_SPACE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free)
+ mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "MAX_DISK_PARTITION_SPACE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free)
+ mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "DISK_PARTITION_SPACE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used)
+ mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
case "MAX_DISK_PARTITION_SPACE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used)
+ mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
case "DISK_PARTITION_SPACE_PERCENT_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free)
+ mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "MAX_DISK_PARTITION_SPACE_PERCENT_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free)
+ mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "DISK_PARTITION_SPACE_PERCENT_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used)
+ mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
case "MAX_DISK_PARTITION_SPACE_PERCENT_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used)
+ mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
// Process Database Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/)
case "DATABASE_COLLECTION_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Collection)
+ mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeCollection)
}
case "DATABASE_INDEX_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Index)
+ mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeIndex)
}
case "DATABASE_EXTENT_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Extent)
+ mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeExtent)
}
case "DATABASE_OBJECT_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Object)
+ mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeObject)
}
case "DATABASE_VIEW_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.View)
+ mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeView)
}
case "DATABASE_AVERAGE_OBJECT_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Object)
+ mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeObject)
}
case "DATABASE_STORAGE_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Storage)
+ mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeStorage)
}
case "DATABASE_INDEX_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Index)
+ mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeIndex)
}
case "DATABASE_DATA_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
- mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Data)
+ mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeData)
}
default:
diff --git a/receiver/mongodbatlasreceiver/metadata.yaml b/receiver/mongodbatlasreceiver/metadata.yaml
index 8a03e605aed9..280d41252caa 100644
--- a/receiver/mongodbatlasreceiver/metadata.yaml
+++ b/receiver/mongodbatlasreceiver/metadata.yaml
@@ -454,7 +454,7 @@ metrics:
description: Swap usage
extended_documentation: Aggregate of MongoDB Metrics MAX_SWAP_USAGE_FREE, MAX_SWAP_USAGE_USED
unit: KiBy
- attributes: [direction]
+ attributes: [memory_state]
gauge:
value_type: double
mongodbatlas.system.paging.usage.average:
@@ -462,7 +462,7 @@ metrics:
description: Swap usage
extended_documentation: Aggregate of MongoDB Metrics SWAP_USAGE_FREE, SWAP_USAGE_USED
unit: KiBy
- attributes: [direction]
+ attributes: [memory_state]
gauge:
value_type: double
mongodbatlas.system.paging.io.max:
diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go
index e411c73232a5..944d0a16e4f2 100644
--- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go
@@ -71,6 +71,130 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeConnectionType specifies the a value connection_type attribute.
+type AttributeConnectionType int
+
+const (
+ _ AttributeConnectionType = iota
+ AttributeConnectionTypeActive
+ AttributeConnectionTypeAvailable
+ AttributeConnectionTypeCurrent
+)
+
+// String returns the string representation of the AttributeConnectionType.
+func (av AttributeConnectionType) String() string {
+ switch av {
+ case AttributeConnectionTypeActive:
+ return "active"
+ case AttributeConnectionTypeAvailable:
+ return "available"
+ case AttributeConnectionTypeCurrent:
+ return "current"
+ }
+ return ""
+}
+
+// MapAttributeConnectionType is a helper map of string to AttributeConnectionType attribute value.
+var MapAttributeConnectionType = map[string]AttributeConnectionType{
+ "active": AttributeConnectionTypeActive,
+ "available": AttributeConnectionTypeAvailable,
+ "current": AttributeConnectionTypeCurrent,
+}
+
+// AttributeMemoryType specifies the a value memory_type attribute.
+type AttributeMemoryType int
+
+const (
+ _ AttributeMemoryType = iota
+ AttributeMemoryTypeResident
+ AttributeMemoryTypeVirtual
+)
+
+// String returns the string representation of the AttributeMemoryType.
+func (av AttributeMemoryType) String() string {
+ switch av {
+ case AttributeMemoryTypeResident:
+ return "resident"
+ case AttributeMemoryTypeVirtual:
+ return "virtual"
+ }
+ return ""
+}
+
+// MapAttributeMemoryType is a helper map of string to AttributeMemoryType attribute value.
+var MapAttributeMemoryType = map[string]AttributeMemoryType{
+ "resident": AttributeMemoryTypeResident,
+ "virtual": AttributeMemoryTypeVirtual,
+}
+
+// AttributeOperation specifies the a value operation attribute.
+type AttributeOperation int
+
+const (
+ _ AttributeOperation = iota
+ AttributeOperationInsert
+ AttributeOperationQuery
+ AttributeOperationUpdate
+ AttributeOperationDelete
+ AttributeOperationGetmore
+ AttributeOperationCommand
+)
+
+// String returns the string representation of the AttributeOperation.
+func (av AttributeOperation) String() string {
+ switch av {
+ case AttributeOperationInsert:
+ return "insert"
+ case AttributeOperationQuery:
+ return "query"
+ case AttributeOperationUpdate:
+ return "update"
+ case AttributeOperationDelete:
+ return "delete"
+ case AttributeOperationGetmore:
+ return "getmore"
+ case AttributeOperationCommand:
+ return "command"
+ }
+ return ""
+}
+
+// MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
+var MapAttributeOperation = map[string]AttributeOperation{
+ "insert": AttributeOperationInsert,
+ "query": AttributeOperationQuery,
+ "update": AttributeOperationUpdate,
+ "delete": AttributeOperationDelete,
+ "getmore": AttributeOperationGetmore,
+ "command": AttributeOperationCommand,
+}
+
+// AttributeType specifies the a value type attribute.
+type AttributeType int
+
+const (
+ _ AttributeType = iota
+ AttributeTypeHit
+ AttributeTypeMiss
+)
+
+// String returns the string representation of the AttributeType.
+func (av AttributeType) String() string {
+ switch av {
+ case AttributeTypeHit:
+ return "hit"
+ case AttributeTypeMiss:
+ return "miss"
+ }
+ return ""
+}
+
+// MapAttributeType is a helper map of string to AttributeType attribute value.
+var MapAttributeType = map[string]AttributeType{
+ "hit": AttributeTypeHit,
+ "miss": AttributeTypeMiss,
+}
+
type metricMongodbCacheOperations struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -816,8 +940,8 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordMongodbCacheOperationsDataPoint adds a data point to mongodb.cache.operations metric.
-func (mb *MetricsBuilder) RecordMongodbCacheOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue string) {
- mb.metricMongodbCacheOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbCacheOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType) {
+ mb.metricMongodbCacheOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String())
}
// RecordMongodbCollectionCountDataPoint adds a data point to mongodb.collection.count metric.
@@ -826,8 +950,8 @@ func (mb *MetricsBuilder) RecordMongodbCollectionCountDataPoint(ts pcommon.Times
}
// RecordMongodbConnectionCountDataPoint adds a data point to mongodb.connection.count metric.
-func (mb *MetricsBuilder) RecordMongodbConnectionCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue string) {
- mb.metricMongodbConnectionCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, connectionTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbConnectionCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue AttributeConnectionType) {
+ mb.metricMongodbConnectionCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, connectionTypeAttributeValue.String())
}
// RecordMongodbDataSizeDataPoint adds a data point to mongodb.data.size metric.
@@ -856,8 +980,8 @@ func (mb *MetricsBuilder) RecordMongodbIndexSizeDataPoint(ts pcommon.Timestamp,
}
// RecordMongodbMemoryUsageDataPoint adds a data point to mongodb.memory.usage metric.
-func (mb *MetricsBuilder) RecordMongodbMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue string) {
- mb.metricMongodbMemoryUsage.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, memoryTypeAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue AttributeMemoryType) {
+ mb.metricMongodbMemoryUsage.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, memoryTypeAttributeValue.String())
}
// RecordMongodbObjectCountDataPoint adds a data point to mongodb.object.count metric.
@@ -866,8 +990,8 @@ func (mb *MetricsBuilder) RecordMongodbObjectCountDataPoint(ts pcommon.Timestamp
}
// RecordMongodbOperationCountDataPoint adds a data point to mongodb.operation.count metric.
-func (mb *MetricsBuilder) RecordMongodbOperationCountDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) {
- mb.metricMongodbOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue)
+func (mb *MetricsBuilder) RecordMongodbOperationCountDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) {
+ mb.metricMongodbOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String())
}
// RecordMongodbStorageSizeDataPoint adds a data point to mongodb.storage.size metric.
@@ -906,49 +1030,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeConnectionType are the possible values that the attribute "connection_type" can have.
-var AttributeConnectionType = struct {
- Active string
- Available string
- Current string
-}{
- "active",
- "available",
- "current",
-}
-
-// AttributeMemoryType are the possible values that the attribute "memory_type" can have.
-var AttributeMemoryType = struct {
- Resident string
- Virtual string
-}{
- "resident",
- "virtual",
-}
-
-// AttributeOperation are the possible values that the attribute "operation" can have.
-var AttributeOperation = struct {
- Insert string
- Query string
- Update string
- Delete string
- Getmore string
- Command string
-}{
- "insert",
- "query",
- "update",
- "delete",
- "getmore",
- "command",
-}
-
-// AttributeType are the possible values that the attribute "type" can have.
-var AttributeType = struct {
- Hit string
- Miss string
-}{
- "hit",
- "miss",
-}
diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go
index 519bac193666..06f1f96f0221 100644
--- a/receiver/mongodbreceiver/metrics.go
+++ b/receiver/mongodbreceiver/metrics.go
@@ -136,13 +136,8 @@ func (s *mongodbScraper) recordExtentCount(now pcommon.Timestamp, doc bson.M, db
// ServerStatus
func (s *mongodbScraper) recordConnections(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) {
- types := []string{
- metadata.AttributeConnectionType.Active,
- metadata.AttributeConnectionType.Available,
- metadata.AttributeConnectionType.Current,
- }
- for _, ct := range types {
- connKey := []string{"connections", ct}
+ for ctVal, ct := range metadata.MapAttributeConnectionType {
+ connKey := []string{"connections", ctVal}
conn, err := dig(doc, connKey)
if err != nil {
errors.AddPartial(1, err)
@@ -159,12 +154,8 @@ func (s *mongodbScraper) recordConnections(now pcommon.Timestamp, doc bson.M, db
}
func (s *mongodbScraper) recordMemoryUsage(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) {
- types := []string{
- metadata.AttributeMemoryType.Resident,
- metadata.AttributeMemoryType.Virtual,
- }
- for _, mt := range types {
- memKey := []string{"mem", mt}
+ for mtVal, mt := range metadata.MapAttributeMemoryType {
+ memKey := []string{"mem", mtVal}
mem, err := dig(doc, memKey)
if err != nil {
errors.AddPartial(1, err)
@@ -185,18 +176,11 @@ func (s *mongodbScraper) recordMemoryUsage(now pcommon.Timestamp, doc bson.M, db
// Admin Stats
func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, errors scrapererror.ScrapeErrors) {
// Collect Operations
- for _, operation := range []string{
- metadata.AttributeOperation.Insert,
- metadata.AttributeOperation.Query,
- metadata.AttributeOperation.Update,
- metadata.AttributeOperation.Delete,
- metadata.AttributeOperation.Getmore,
- metadata.AttributeOperation.Command,
- } {
- count, err := dig(doc, []string{"opcounters", operation})
+ for operationVal, operation := range metadata.MapAttributeOperation {
+ count, err := dig(doc, []string{"opcounters", operationVal})
if err != nil {
errors.AddPartial(1, err)
- s.logger.Error("failed to find operation", zap.Error(err), zap.String("operation", operation))
+ s.logger.Error("failed to find operation", zap.Error(err), zap.String("operation", operationVal))
continue
}
countVal, err := parseInt(count)
@@ -224,7 +208,7 @@ func (s *mongodbScraper) recordCacheOperations(now pcommon.Timestamp, doc bson.M
if err != nil {
errors.AddPartial(1, err)
} else {
- s.mb.RecordMongodbCacheOperationsDataPoint(now, cacheMissesValue, metadata.AttributeType.Miss)
+ s.mb.RecordMongodbCacheOperationsDataPoint(now, cacheMissesValue, metadata.AttributeTypeMiss)
}
tcr, err := dig(doc, []string{"wiredTiger", "cache", "pages requested from the cache"})
@@ -242,7 +226,7 @@ func (s *mongodbScraper) recordCacheOperations(now pcommon.Timestamp, doc bson.M
if canCalculateCacheHits && totalCacheReqs > cacheMissesValue {
cacheHits := totalCacheReqs - cacheMissesValue
- s.mb.RecordMongodbCacheOperationsDataPoint(now, cacheHits, metadata.AttributeType.Hit)
+ s.mb.RecordMongodbCacheOperationsDataPoint(now, cacheHits, metadata.AttributeTypeHit)
}
}
diff --git a/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go
index deb3570ae2d3..9cd7aa1b19e1 100644
--- a/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go
@@ -93,6 +93,510 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeBufferPoolData specifies the a value buffer_pool_data attribute.
+type AttributeBufferPoolData int
+
+const (
+ _ AttributeBufferPoolData = iota
+ AttributeBufferPoolDataDirty
+ AttributeBufferPoolDataClean
+)
+
+// String returns the string representation of the AttributeBufferPoolData.
+func (av AttributeBufferPoolData) String() string {
+ switch av {
+ case AttributeBufferPoolDataDirty:
+ return "dirty"
+ case AttributeBufferPoolDataClean:
+ return "clean"
+ }
+ return ""
+}
+
+// MapAttributeBufferPoolData is a helper map of string to AttributeBufferPoolData attribute value.
+var MapAttributeBufferPoolData = map[string]AttributeBufferPoolData{
+ "dirty": AttributeBufferPoolDataDirty,
+ "clean": AttributeBufferPoolDataClean,
+}
+
+// AttributeBufferPoolOperations specifies the a value buffer_pool_operations attribute.
+type AttributeBufferPoolOperations int
+
+const (
+ _ AttributeBufferPoolOperations = iota
+ AttributeBufferPoolOperationsReadAheadRnd
+ AttributeBufferPoolOperationsReadAhead
+ AttributeBufferPoolOperationsReadAheadEvicted
+ AttributeBufferPoolOperationsReadRequests
+ AttributeBufferPoolOperationsReads
+ AttributeBufferPoolOperationsWaitFree
+ AttributeBufferPoolOperationsWriteRequests
+)
+
+// String returns the string representation of the AttributeBufferPoolOperations.
+func (av AttributeBufferPoolOperations) String() string {
+ switch av {
+ case AttributeBufferPoolOperationsReadAheadRnd:
+ return "read_ahead_rnd"
+ case AttributeBufferPoolOperationsReadAhead:
+ return "read_ahead"
+ case AttributeBufferPoolOperationsReadAheadEvicted:
+ return "read_ahead_evicted"
+ case AttributeBufferPoolOperationsReadRequests:
+ return "read_requests"
+ case AttributeBufferPoolOperationsReads:
+ return "reads"
+ case AttributeBufferPoolOperationsWaitFree:
+ return "wait_free"
+ case AttributeBufferPoolOperationsWriteRequests:
+ return "write_requests"
+ }
+ return ""
+}
+
+// MapAttributeBufferPoolOperations is a helper map of string to AttributeBufferPoolOperations attribute value.
+var MapAttributeBufferPoolOperations = map[string]AttributeBufferPoolOperations{
+ "read_ahead_rnd": AttributeBufferPoolOperationsReadAheadRnd,
+ "read_ahead": AttributeBufferPoolOperationsReadAhead,
+ "read_ahead_evicted": AttributeBufferPoolOperationsReadAheadEvicted,
+ "read_requests": AttributeBufferPoolOperationsReadRequests,
+ "reads": AttributeBufferPoolOperationsReads,
+ "wait_free": AttributeBufferPoolOperationsWaitFree,
+ "write_requests": AttributeBufferPoolOperationsWriteRequests,
+}
+
+// AttributeBufferPoolPages specifies the a value buffer_pool_pages attribute.
+type AttributeBufferPoolPages int
+
+const (
+ _ AttributeBufferPoolPages = iota
+ AttributeBufferPoolPagesData
+ AttributeBufferPoolPagesFree
+ AttributeBufferPoolPagesMisc
+)
+
+// String returns the string representation of the AttributeBufferPoolPages.
+func (av AttributeBufferPoolPages) String() string {
+ switch av {
+ case AttributeBufferPoolPagesData:
+ return "data"
+ case AttributeBufferPoolPagesFree:
+ return "free"
+ case AttributeBufferPoolPagesMisc:
+ return "misc"
+ }
+ return ""
+}
+
+// MapAttributeBufferPoolPages is a helper map of string to AttributeBufferPoolPages attribute value.
+var MapAttributeBufferPoolPages = map[string]AttributeBufferPoolPages{
+ "data": AttributeBufferPoolPagesData,
+ "free": AttributeBufferPoolPagesFree,
+ "misc": AttributeBufferPoolPagesMisc,
+}
+
+// AttributeCommand specifies the a value command attribute.
+type AttributeCommand int
+
+const (
+ _ AttributeCommand = iota
+ AttributeCommandExecute
+ AttributeCommandClose
+ AttributeCommandFetch
+ AttributeCommandPrepare
+ AttributeCommandReset
+ AttributeCommandSendLongData
+)
+
+// String returns the string representation of the AttributeCommand.
+func (av AttributeCommand) String() string {
+ switch av {
+ case AttributeCommandExecute:
+ return "execute"
+ case AttributeCommandClose:
+ return "close"
+ case AttributeCommandFetch:
+ return "fetch"
+ case AttributeCommandPrepare:
+ return "prepare"
+ case AttributeCommandReset:
+ return "reset"
+ case AttributeCommandSendLongData:
+ return "send_long_data"
+ }
+ return ""
+}
+
+// MapAttributeCommand is a helper map of string to AttributeCommand attribute value.
+var MapAttributeCommand = map[string]AttributeCommand{
+ "execute": AttributeCommandExecute,
+ "close": AttributeCommandClose,
+ "fetch": AttributeCommandFetch,
+ "prepare": AttributeCommandPrepare,
+ "reset": AttributeCommandReset,
+ "send_long_data": AttributeCommandSendLongData,
+}
+
+// AttributeDoubleWrites specifies the a value double_writes attribute.
+type AttributeDoubleWrites int
+
+const (
+ _ AttributeDoubleWrites = iota
+ AttributeDoubleWritesPagesWritten
+ AttributeDoubleWritesWrites
+)
+
+// String returns the string representation of the AttributeDoubleWrites.
+func (av AttributeDoubleWrites) String() string {
+ switch av {
+ case AttributeDoubleWritesPagesWritten:
+ return "pages_written"
+ case AttributeDoubleWritesWrites:
+ return "writes"
+ }
+ return ""
+}
+
+// MapAttributeDoubleWrites is a helper map of string to AttributeDoubleWrites attribute value.
+var MapAttributeDoubleWrites = map[string]AttributeDoubleWrites{
+ "pages_written": AttributeDoubleWritesPagesWritten,
+ "writes": AttributeDoubleWritesWrites,
+}
+
+// AttributeHandler specifies the a value handler attribute.
+type AttributeHandler int
+
+const (
+ _ AttributeHandler = iota
+ AttributeHandlerCommit
+ AttributeHandlerDelete
+ AttributeHandlerDiscover
+ AttributeHandlerExternalLock
+ AttributeHandlerMrrInit
+ AttributeHandlerPrepare
+ AttributeHandlerReadFirst
+ AttributeHandlerReadKey
+ AttributeHandlerReadLast
+ AttributeHandlerReadNext
+ AttributeHandlerReadPrev
+ AttributeHandlerReadRnd
+ AttributeHandlerReadRndNext
+ AttributeHandlerRollback
+ AttributeHandlerSavepoint
+ AttributeHandlerSavepointRollback
+ AttributeHandlerUpdate
+ AttributeHandlerWrite
+)
+
+// String returns the string representation of the AttributeHandler.
+func (av AttributeHandler) String() string {
+ switch av {
+ case AttributeHandlerCommit:
+ return "commit"
+ case AttributeHandlerDelete:
+ return "delete"
+ case AttributeHandlerDiscover:
+ return "discover"
+ case AttributeHandlerExternalLock:
+ return "external_lock"
+ case AttributeHandlerMrrInit:
+ return "mrr_init"
+ case AttributeHandlerPrepare:
+ return "prepare"
+ case AttributeHandlerReadFirst:
+ return "read_first"
+ case AttributeHandlerReadKey:
+ return "read_key"
+ case AttributeHandlerReadLast:
+ return "read_last"
+ case AttributeHandlerReadNext:
+ return "read_next"
+ case AttributeHandlerReadPrev:
+ return "read_prev"
+ case AttributeHandlerReadRnd:
+ return "read_rnd"
+ case AttributeHandlerReadRndNext:
+ return "read_rnd_next"
+ case AttributeHandlerRollback:
+ return "rollback"
+ case AttributeHandlerSavepoint:
+ return "savepoint"
+ case AttributeHandlerSavepointRollback:
+ return "savepoint_rollback"
+ case AttributeHandlerUpdate:
+ return "update"
+ case AttributeHandlerWrite:
+ return "write"
+ }
+ return ""
+}
+
+// MapAttributeHandler is a helper map of string to AttributeHandler attribute value.
+var MapAttributeHandler = map[string]AttributeHandler{
+ "commit": AttributeHandlerCommit,
+ "delete": AttributeHandlerDelete,
+ "discover": AttributeHandlerDiscover,
+ "external_lock": AttributeHandlerExternalLock,
+ "mrr_init": AttributeHandlerMrrInit,
+ "prepare": AttributeHandlerPrepare,
+ "read_first": AttributeHandlerReadFirst,
+ "read_key": AttributeHandlerReadKey,
+ "read_last": AttributeHandlerReadLast,
+ "read_next": AttributeHandlerReadNext,
+ "read_prev": AttributeHandlerReadPrev,
+ "read_rnd": AttributeHandlerReadRnd,
+ "read_rnd_next": AttributeHandlerReadRndNext,
+ "rollback": AttributeHandlerRollback,
+ "savepoint": AttributeHandlerSavepoint,
+ "savepoint_rollback": AttributeHandlerSavepointRollback,
+ "update": AttributeHandlerUpdate,
+ "write": AttributeHandlerWrite,
+}
+
+// AttributeLocks specifies the a value locks attribute.
+type AttributeLocks int
+
+const (
+ _ AttributeLocks = iota
+ AttributeLocksImmediate
+ AttributeLocksWaited
+)
+
+// String returns the string representation of the AttributeLocks.
+func (av AttributeLocks) String() string {
+ switch av {
+ case AttributeLocksImmediate:
+ return "immediate"
+ case AttributeLocksWaited:
+ return "waited"
+ }
+ return ""
+}
+
+// MapAttributeLocks is a helper map of string to AttributeLocks attribute value.
+var MapAttributeLocks = map[string]AttributeLocks{
+ "immediate": AttributeLocksImmediate,
+ "waited": AttributeLocksWaited,
+}
+
+// AttributeLogOperations specifies the a value log_operations attribute.
+type AttributeLogOperations int
+
+const (
+ _ AttributeLogOperations = iota
+ AttributeLogOperationsWaits
+ AttributeLogOperationsWriteRequests
+ AttributeLogOperationsWrites
+)
+
+// String returns the string representation of the AttributeLogOperations.
+func (av AttributeLogOperations) String() string {
+ switch av {
+ case AttributeLogOperationsWaits:
+ return "waits"
+ case AttributeLogOperationsWriteRequests:
+ return "write_requests"
+ case AttributeLogOperationsWrites:
+ return "writes"
+ }
+ return ""
+}
+
+// MapAttributeLogOperations is a helper map of string to AttributeLogOperations attribute value.
+var MapAttributeLogOperations = map[string]AttributeLogOperations{
+ "waits": AttributeLogOperationsWaits,
+ "write_requests": AttributeLogOperationsWriteRequests,
+ "writes": AttributeLogOperationsWrites,
+}
+
+// AttributeOperations specifies the a value operations attribute.
+type AttributeOperations int
+
+const (
+ _ AttributeOperations = iota
+ AttributeOperationsFsyncs
+ AttributeOperationsReads
+ AttributeOperationsWrites
+)
+
+// String returns the string representation of the AttributeOperations.
+func (av AttributeOperations) String() string {
+ switch av {
+ case AttributeOperationsFsyncs:
+ return "fsyncs"
+ case AttributeOperationsReads:
+ return "reads"
+ case AttributeOperationsWrites:
+ return "writes"
+ }
+ return ""
+}
+
+// MapAttributeOperations is a helper map of string to AttributeOperations attribute value.
+var MapAttributeOperations = map[string]AttributeOperations{
+ "fsyncs": AttributeOperationsFsyncs,
+ "reads": AttributeOperationsReads,
+ "writes": AttributeOperationsWrites,
+}
+
+// AttributePageOperations specifies the a value page_operations attribute.
+type AttributePageOperations int
+
+const (
+ _ AttributePageOperations = iota
+ AttributePageOperationsCreated
+ AttributePageOperationsRead
+ AttributePageOperationsWritten
+)
+
+// String returns the string representation of the AttributePageOperations.
+func (av AttributePageOperations) String() string {
+ switch av {
+ case AttributePageOperationsCreated:
+ return "created"
+ case AttributePageOperationsRead:
+ return "read"
+ case AttributePageOperationsWritten:
+ return "written"
+ }
+ return ""
+}
+
+// MapAttributePageOperations is a helper map of string to AttributePageOperations attribute value.
+var MapAttributePageOperations = map[string]AttributePageOperations{
+ "created": AttributePageOperationsCreated,
+ "read": AttributePageOperationsRead,
+ "written": AttributePageOperationsWritten,
+}
+
+// AttributeRowLocks specifies the a value row_locks attribute.
+type AttributeRowLocks int
+
+const (
+ _ AttributeRowLocks = iota
+ AttributeRowLocksWaits
+ AttributeRowLocksTime
+)
+
+// String returns the string representation of the AttributeRowLocks.
+func (av AttributeRowLocks) String() string {
+ switch av {
+ case AttributeRowLocksWaits:
+ return "waits"
+ case AttributeRowLocksTime:
+ return "time"
+ }
+ return ""
+}
+
+// MapAttributeRowLocks is a helper map of string to AttributeRowLocks attribute value.
+var MapAttributeRowLocks = map[string]AttributeRowLocks{
+ "waits": AttributeRowLocksWaits,
+ "time": AttributeRowLocksTime,
+}
+
+// AttributeRowOperations specifies the a value row_operations attribute.
+type AttributeRowOperations int
+
+const (
+ _ AttributeRowOperations = iota
+ AttributeRowOperationsDeleted
+ AttributeRowOperationsInserted
+ AttributeRowOperationsRead
+ AttributeRowOperationsUpdated
+)
+
+// String returns the string representation of the AttributeRowOperations.
+func (av AttributeRowOperations) String() string {
+ switch av {
+ case AttributeRowOperationsDeleted:
+ return "deleted"
+ case AttributeRowOperationsInserted:
+ return "inserted"
+ case AttributeRowOperationsRead:
+ return "read"
+ case AttributeRowOperationsUpdated:
+ return "updated"
+ }
+ return ""
+}
+
+// MapAttributeRowOperations is a helper map of string to AttributeRowOperations attribute value.
+var MapAttributeRowOperations = map[string]AttributeRowOperations{
+ "deleted": AttributeRowOperationsDeleted,
+ "inserted": AttributeRowOperationsInserted,
+ "read": AttributeRowOperationsRead,
+ "updated": AttributeRowOperationsUpdated,
+}
+
+// AttributeSorts specifies the a value sorts attribute.
+type AttributeSorts int
+
+const (
+ _ AttributeSorts = iota
+ AttributeSortsMergePasses
+ AttributeSortsRange
+ AttributeSortsRows
+ AttributeSortsScan
+)
+
+// String returns the string representation of the AttributeSorts.
+func (av AttributeSorts) String() string {
+ switch av {
+ case AttributeSortsMergePasses:
+ return "merge_passes"
+ case AttributeSortsRange:
+ return "range"
+ case AttributeSortsRows:
+ return "rows"
+ case AttributeSortsScan:
+ return "scan"
+ }
+ return ""
+}
+
+// MapAttributeSorts is a helper map of string to AttributeSorts attribute value.
+var MapAttributeSorts = map[string]AttributeSorts{
+ "merge_passes": AttributeSortsMergePasses,
+ "range": AttributeSortsRange,
+ "rows": AttributeSortsRows,
+ "scan": AttributeSortsScan,
+}
+
+// AttributeThreads specifies the a value threads attribute.
+type AttributeThreads int
+
+const (
+ _ AttributeThreads = iota
+ AttributeThreadsCached
+ AttributeThreadsConnected
+ AttributeThreadsCreated
+ AttributeThreadsRunning
+)
+
+// String returns the string representation of the AttributeThreads.
+func (av AttributeThreads) String() string {
+ switch av {
+ case AttributeThreadsCached:
+ return "cached"
+ case AttributeThreadsConnected:
+ return "connected"
+ case AttributeThreadsCreated:
+ return "created"
+ case AttributeThreadsRunning:
+ return "running"
+ }
+ return ""
+}
+
+// MapAttributeThreads is a helper map of string to AttributeThreads attribute value.
+var MapAttributeThreads = map[string]AttributeThreads{
+ "cached": AttributeThreadsCached,
+ "connected": AttributeThreadsConnected,
+ "created": AttributeThreadsCreated,
+ "running": AttributeThreadsRunning,
+}
+
type metricMysqlBufferPoolDataPages struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -1114,8 +1618,8 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
}
// RecordMysqlBufferPoolDataPagesDataPoint adds a data point to mysql.buffer_pool.data_pages metric.
-func (mb *MetricsBuilder) RecordMysqlBufferPoolDataPagesDataPoint(ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue string) {
- mb.metricMysqlBufferPoolDataPages.recordDataPoint(mb.startTime, ts, val, bufferPoolDataAttributeValue)
+func (mb *MetricsBuilder) RecordMysqlBufferPoolDataPagesDataPoint(ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue AttributeBufferPoolData) {
+ mb.metricMysqlBufferPoolDataPages.recordDataPoint(mb.startTime, ts, val, bufferPoolDataAttributeValue.String())
}
// RecordMysqlBufferPoolLimitDataPoint adds a data point to mysql.buffer_pool.limit metric.
@@ -1129,11 +1633,11 @@ func (mb *MetricsBuilder) RecordMysqlBufferPoolLimitDataPoint(ts pcommon.Timesta
}
// RecordMysqlBufferPoolOperationsDataPoint adds a data point to mysql.buffer_pool.operations metric.
-func (mb *MetricsBuilder) RecordMysqlBufferPoolOperationsDataPoint(ts pcommon.Timestamp, val string, bufferPoolOperationsAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlBufferPoolOperationsDataPoint(ts pcommon.Timestamp, val string, bufferPoolOperationsAttributeValue AttributeBufferPoolOperations) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlBufferPoolOperations, value was %s: %w", val, err)
} else {
- mb.metricMysqlBufferPoolOperations.recordDataPoint(mb.startTime, ts, i, bufferPoolOperationsAttributeValue)
+ mb.metricMysqlBufferPoolOperations.recordDataPoint(mb.startTime, ts, i, bufferPoolOperationsAttributeValue.String())
}
return nil
}
@@ -1149,126 +1653,126 @@ func (mb *MetricsBuilder) RecordMysqlBufferPoolPageFlushesDataPoint(ts pcommon.T
}
// RecordMysqlBufferPoolPagesDataPoint adds a data point to mysql.buffer_pool.pages metric.
-func (mb *MetricsBuilder) RecordMysqlBufferPoolPagesDataPoint(ts pcommon.Timestamp, val string, bufferPoolPagesAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlBufferPoolPagesDataPoint(ts pcommon.Timestamp, val string, bufferPoolPagesAttributeValue AttributeBufferPoolPages) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlBufferPoolPages, value was %s: %w", val, err)
} else {
- mb.metricMysqlBufferPoolPages.recordDataPoint(mb.startTime, ts, i, bufferPoolPagesAttributeValue)
+ mb.metricMysqlBufferPoolPages.recordDataPoint(mb.startTime, ts, i, bufferPoolPagesAttributeValue.String())
}
return nil
}
// RecordMysqlBufferPoolUsageDataPoint adds a data point to mysql.buffer_pool.usage metric.
-func (mb *MetricsBuilder) RecordMysqlBufferPoolUsageDataPoint(ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue string) {
- mb.metricMysqlBufferPoolUsage.recordDataPoint(mb.startTime, ts, val, bufferPoolDataAttributeValue)
+func (mb *MetricsBuilder) RecordMysqlBufferPoolUsageDataPoint(ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue AttributeBufferPoolData) {
+ mb.metricMysqlBufferPoolUsage.recordDataPoint(mb.startTime, ts, val, bufferPoolDataAttributeValue.String())
}
// RecordMysqlCommandsDataPoint adds a data point to mysql.commands metric.
-func (mb *MetricsBuilder) RecordMysqlCommandsDataPoint(ts pcommon.Timestamp, val string, commandAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlCommandsDataPoint(ts pcommon.Timestamp, val string, commandAttributeValue AttributeCommand) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlCommands, value was %s: %w", val, err)
} else {
- mb.metricMysqlCommands.recordDataPoint(mb.startTime, ts, i, commandAttributeValue)
+ mb.metricMysqlCommands.recordDataPoint(mb.startTime, ts, i, commandAttributeValue.String())
}
return nil
}
// RecordMysqlDoubleWritesDataPoint adds a data point to mysql.double_writes metric.
-func (mb *MetricsBuilder) RecordMysqlDoubleWritesDataPoint(ts pcommon.Timestamp, val string, doubleWritesAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlDoubleWritesDataPoint(ts pcommon.Timestamp, val string, doubleWritesAttributeValue AttributeDoubleWrites) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlDoubleWrites, value was %s: %w", val, err)
} else {
- mb.metricMysqlDoubleWrites.recordDataPoint(mb.startTime, ts, i, doubleWritesAttributeValue)
+ mb.metricMysqlDoubleWrites.recordDataPoint(mb.startTime, ts, i, doubleWritesAttributeValue.String())
}
return nil
}
// RecordMysqlHandlersDataPoint adds a data point to mysql.handlers metric.
-func (mb *MetricsBuilder) RecordMysqlHandlersDataPoint(ts pcommon.Timestamp, val string, handlerAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlHandlersDataPoint(ts pcommon.Timestamp, val string, handlerAttributeValue AttributeHandler) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlHandlers, value was %s: %w", val, err)
} else {
- mb.metricMysqlHandlers.recordDataPoint(mb.startTime, ts, i, handlerAttributeValue)
+ mb.metricMysqlHandlers.recordDataPoint(mb.startTime, ts, i, handlerAttributeValue.String())
}
return nil
}
// RecordMysqlLocksDataPoint adds a data point to mysql.locks metric.
-func (mb *MetricsBuilder) RecordMysqlLocksDataPoint(ts pcommon.Timestamp, val string, locksAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlLocksDataPoint(ts pcommon.Timestamp, val string, locksAttributeValue AttributeLocks) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlLocks, value was %s: %w", val, err)
} else {
- mb.metricMysqlLocks.recordDataPoint(mb.startTime, ts, i, locksAttributeValue)
+ mb.metricMysqlLocks.recordDataPoint(mb.startTime, ts, i, locksAttributeValue.String())
}
return nil
}
// RecordMysqlLogOperationsDataPoint adds a data point to mysql.log_operations metric.
-func (mb *MetricsBuilder) RecordMysqlLogOperationsDataPoint(ts pcommon.Timestamp, val string, logOperationsAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlLogOperationsDataPoint(ts pcommon.Timestamp, val string, logOperationsAttributeValue AttributeLogOperations) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlLogOperations, value was %s: %w", val, err)
} else {
- mb.metricMysqlLogOperations.recordDataPoint(mb.startTime, ts, i, logOperationsAttributeValue)
+ mb.metricMysqlLogOperations.recordDataPoint(mb.startTime, ts, i, logOperationsAttributeValue.String())
}
return nil
}
// RecordMysqlOperationsDataPoint adds a data point to mysql.operations metric.
-func (mb *MetricsBuilder) RecordMysqlOperationsDataPoint(ts pcommon.Timestamp, val string, operationsAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlOperationsDataPoint(ts pcommon.Timestamp, val string, operationsAttributeValue AttributeOperations) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlOperations, value was %s: %w", val, err)
} else {
- mb.metricMysqlOperations.recordDataPoint(mb.startTime, ts, i, operationsAttributeValue)
+ mb.metricMysqlOperations.recordDataPoint(mb.startTime, ts, i, operationsAttributeValue.String())
}
return nil
}
// RecordMysqlPageOperationsDataPoint adds a data point to mysql.page_operations metric.
-func (mb *MetricsBuilder) RecordMysqlPageOperationsDataPoint(ts pcommon.Timestamp, val string, pageOperationsAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlPageOperationsDataPoint(ts pcommon.Timestamp, val string, pageOperationsAttributeValue AttributePageOperations) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlPageOperations, value was %s: %w", val, err)
} else {
- mb.metricMysqlPageOperations.recordDataPoint(mb.startTime, ts, i, pageOperationsAttributeValue)
+ mb.metricMysqlPageOperations.recordDataPoint(mb.startTime, ts, i, pageOperationsAttributeValue.String())
}
return nil
}
// RecordMysqlRowLocksDataPoint adds a data point to mysql.row_locks metric.
-func (mb *MetricsBuilder) RecordMysqlRowLocksDataPoint(ts pcommon.Timestamp, val string, rowLocksAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlRowLocksDataPoint(ts pcommon.Timestamp, val string, rowLocksAttributeValue AttributeRowLocks) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlRowLocks, value was %s: %w", val, err)
} else {
- mb.metricMysqlRowLocks.recordDataPoint(mb.startTime, ts, i, rowLocksAttributeValue)
+ mb.metricMysqlRowLocks.recordDataPoint(mb.startTime, ts, i, rowLocksAttributeValue.String())
}
return nil
}
// RecordMysqlRowOperationsDataPoint adds a data point to mysql.row_operations metric.
-func (mb *MetricsBuilder) RecordMysqlRowOperationsDataPoint(ts pcommon.Timestamp, val string, rowOperationsAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlRowOperationsDataPoint(ts pcommon.Timestamp, val string, rowOperationsAttributeValue AttributeRowOperations) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlRowOperations, value was %s: %w", val, err)
} else {
- mb.metricMysqlRowOperations.recordDataPoint(mb.startTime, ts, i, rowOperationsAttributeValue)
+ mb.metricMysqlRowOperations.recordDataPoint(mb.startTime, ts, i, rowOperationsAttributeValue.String())
}
return nil
}
// RecordMysqlSortsDataPoint adds a data point to mysql.sorts metric.
-func (mb *MetricsBuilder) RecordMysqlSortsDataPoint(ts pcommon.Timestamp, val string, sortsAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlSortsDataPoint(ts pcommon.Timestamp, val string, sortsAttributeValue AttributeSorts) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlSorts, value was %s: %w", val, err)
} else {
- mb.metricMysqlSorts.recordDataPoint(mb.startTime, ts, i, sortsAttributeValue)
+ mb.metricMysqlSorts.recordDataPoint(mb.startTime, ts, i, sortsAttributeValue.String())
}
return nil
}
// RecordMysqlThreadsDataPoint adds a data point to mysql.threads metric.
-func (mb *MetricsBuilder) RecordMysqlThreadsDataPoint(ts pcommon.Timestamp, val string, threadsAttributeValue string) error {
+func (mb *MetricsBuilder) RecordMysqlThreadsDataPoint(ts pcommon.Timestamp, val string, threadsAttributeValue AttributeThreads) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for MysqlThreads, value was %s: %w", val, err)
} else {
- mb.metricMysqlThreads.recordDataPoint(mb.startTime, ts, i, threadsAttributeValue)
+ mb.metricMysqlThreads.recordDataPoint(mb.startTime, ts, i, threadsAttributeValue.String())
}
return nil
}
@@ -1331,199 +1835,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeBufferPoolData are the possible values that the attribute "buffer_pool_data" can have.
-var AttributeBufferPoolData = struct {
- Dirty string
- Clean string
-}{
- "dirty",
- "clean",
-}
-
-// AttributeBufferPoolOperations are the possible values that the attribute "buffer_pool_operations" can have.
-var AttributeBufferPoolOperations = struct {
- ReadAheadRnd string
- ReadAhead string
- ReadAheadEvicted string
- ReadRequests string
- Reads string
- WaitFree string
- WriteRequests string
-}{
- "read_ahead_rnd",
- "read_ahead",
- "read_ahead_evicted",
- "read_requests",
- "reads",
- "wait_free",
- "write_requests",
-}
-
-// AttributeBufferPoolPages are the possible values that the attribute "buffer_pool_pages" can have.
-var AttributeBufferPoolPages = struct {
- Data string
- Free string
- Misc string
-}{
- "data",
- "free",
- "misc",
-}
-
-// AttributeCommand are the possible values that the attribute "command" can have.
-var AttributeCommand = struct {
- Execute string
- Close string
- Fetch string
- Prepare string
- Reset string
- SendLongData string
-}{
- "execute",
- "close",
- "fetch",
- "prepare",
- "reset",
- "send_long_data",
-}
-
-// AttributeDoubleWrites are the possible values that the attribute "double_writes" can have.
-var AttributeDoubleWrites = struct {
- PagesWritten string
- Writes string
-}{
- "pages_written",
- "writes",
-}
-
-// AttributeHandler are the possible values that the attribute "handler" can have.
-var AttributeHandler = struct {
- Commit string
- Delete string
- Discover string
- ExternalLock string
- MrrInit string
- Prepare string
- ReadFirst string
- ReadKey string
- ReadLast string
- ReadNext string
- ReadPrev string
- ReadRnd string
- ReadRndNext string
- Rollback string
- Savepoint string
- SavepointRollback string
- Update string
- Write string
-}{
- "commit",
- "delete",
- "discover",
- "external_lock",
- "mrr_init",
- "prepare",
- "read_first",
- "read_key",
- "read_last",
- "read_next",
- "read_prev",
- "read_rnd",
- "read_rnd_next",
- "rollback",
- "savepoint",
- "savepoint_rollback",
- "update",
- "write",
-}
-
-// AttributeLocks are the possible values that the attribute "locks" can have.
-var AttributeLocks = struct {
- Immediate string
- Waited string
-}{
- "immediate",
- "waited",
-}
-
-// AttributeLogOperations are the possible values that the attribute "log_operations" can have.
-var AttributeLogOperations = struct {
- Waits string
- WriteRequests string
- Writes string
-}{
- "waits",
- "write_requests",
- "writes",
-}
-
-// AttributeOperations are the possible values that the attribute "operations" can have.
-var AttributeOperations = struct {
- Fsyncs string
- Reads string
- Writes string
-}{
- "fsyncs",
- "reads",
- "writes",
-}
-
-// AttributePageOperations are the possible values that the attribute "page_operations" can have.
-var AttributePageOperations = struct {
- Created string
- Read string
- Written string
-}{
- "created",
- "read",
- "written",
-}
-
-// AttributeRowLocks are the possible values that the attribute "row_locks" can have.
-var AttributeRowLocks = struct {
- Waits string
- Time string
-}{
- "waits",
- "time",
-}
-
-// AttributeRowOperations are the possible values that the attribute "row_operations" can have.
-var AttributeRowOperations = struct {
- Deleted string
- Inserted string
- Read string
- Updated string
-}{
- "deleted",
- "inserted",
- "read",
- "updated",
-}
-
-// AttributeSorts are the possible values that the attribute "sorts" can have.
-var AttributeSorts = struct {
- MergePasses string
- Range string
- Rows string
- Scan string
-}{
- "merge_passes",
- "range",
- "rows",
- "scan",
-}
-
-// AttributeThreads are the possible values that the attribute "threads" can have.
-var AttributeThreads = struct {
- Cached string
- Connected string
- Created string
- Running string
-}{
- "cached",
- "connected",
- "created",
- "running",
-}
diff --git a/receiver/mysqlreceiver/scraper.go b/receiver/mysqlreceiver/scraper.go
index 353e49ed076a..9aa1f6381ed4 100644
--- a/receiver/mysqlreceiver/scraper.go
+++ b/receiver/mysqlreceiver/scraper.go
@@ -105,11 +105,14 @@ func (m *mySQLScraper) scrape(context.Context) (pmetric.Metrics, error) {
// buffer_pool.pages
case "Innodb_buffer_pool_pages_data":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPagesDataPoint(now, v, "data"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPagesDataPoint(now, v,
+ metadata.AttributeBufferPoolPagesData))
case "Innodb_buffer_pool_pages_free":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPagesDataPoint(now, v, "free"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPagesDataPoint(now, v,
+ metadata.AttributeBufferPoolPagesFree))
case "Innodb_buffer_pool_pages_misc":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPagesDataPoint(now, v, "misc"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolPagesDataPoint(now, v,
+ metadata.AttributeBufferPoolPagesMisc))
// buffer_pool.page_flushes
case "Innodb_buffer_pool_pages_flushed":
@@ -117,143 +120,154 @@ func (m *mySQLScraper) scrape(context.Context) (pmetric.Metrics, error) {
// buffer_pool.operations
case "Innodb_buffer_pool_read_ahead_rnd":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "read_ahead_rnd"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v,
+ metadata.AttributeBufferPoolOperationsReadAheadRnd))
case "Innodb_buffer_pool_read_ahead":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "read_ahead"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v,
+ metadata.AttributeBufferPoolOperationsReadAhead))
case "Innodb_buffer_pool_read_ahead_evicted":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "read_ahead_evicted"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v,
+ metadata.AttributeBufferPoolOperationsReadAheadEvicted))
case "Innodb_buffer_pool_read_requests":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "read_requests"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v,
+ metadata.AttributeBufferPoolOperationsReadRequests))
case "Innodb_buffer_pool_reads":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "reads"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v,
+ metadata.AttributeBufferPoolOperationsReads))
case "Innodb_buffer_pool_wait_free":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "wait_free"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v,
+ metadata.AttributeBufferPoolOperationsWaitFree))
case "Innodb_buffer_pool_write_requests":
- addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v, "write_requests"))
+ addPartialIfError(errors, m.mb.RecordMysqlBufferPoolOperationsDataPoint(now, v,
+ metadata.AttributeBufferPoolOperationsWriteRequests))
// commands
case "Com_stmt_execute":
- addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "execute"))
+ addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, metadata.AttributeCommandExecute))
case "Com_stmt_close":
- addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "close"))
+ addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, metadata.AttributeCommandClose))
case "Com_stmt_fetch":
- addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "fetch"))
+ addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, metadata.AttributeCommandFetch))
case "Com_stmt_prepare":
- addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "prepare"))
+ addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, metadata.AttributeCommandPrepare))
case "Com_stmt_reset":
- addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "reset"))
+ addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, metadata.AttributeCommandReset))
case "Com_stmt_send_long_data":
- addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, "send_long_data"))
+ addPartialIfError(errors, m.mb.RecordMysqlCommandsDataPoint(now, v, metadata.AttributeCommandSendLongData))
// handlers
case "Handler_commit":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "commit"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerCommit))
case "Handler_delete":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "delete"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerDelete))
case "Handler_discover":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "discover"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerDiscover))
case "Handler_external_lock":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "external_lock"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerExternalLock))
case "Handler_mrr_init":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "mrr_init"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerMrrInit))
case "Handler_prepare":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "prepare"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerPrepare))
case "Handler_read_first":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_first"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerReadFirst))
case "Handler_read_key":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_key"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerReadKey))
case "Handler_read_last":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_last"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerReadLast))
case "Handler_read_next":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_next"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerReadNext))
case "Handler_read_prev":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_prev"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerReadPrev))
case "Handler_read_rnd":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_rnd"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerReadRnd))
case "Handler_read_rnd_next":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "read_rnd_next"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerReadRndNext))
case "Handler_rollback":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "rollback"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerRollback))
case "Handler_savepoint":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "savepoint"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerSavepoint))
case "Handler_savepoint_rollback":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "savepoint_rollback"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerSavepointRollback))
case "Handler_update":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "update"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerUpdate))
case "Handler_write":
- addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, "write"))
+ addPartialIfError(errors, m.mb.RecordMysqlHandlersDataPoint(now, v, metadata.AttributeHandlerWrite))
// double_writes
case "Innodb_dblwr_pages_written":
- addPartialIfError(errors, m.mb.RecordMysqlDoubleWritesDataPoint(now, v, "pages_written"))
+ addPartialIfError(errors, m.mb.RecordMysqlDoubleWritesDataPoint(now, v, metadata.AttributeDoubleWritesPagesWritten))
case "Innodb_dblwr_writes":
- addPartialIfError(errors, m.mb.RecordMysqlDoubleWritesDataPoint(now, v, "writes"))
+ addPartialIfError(errors, m.mb.RecordMysqlDoubleWritesDataPoint(now, v, metadata.AttributeDoubleWritesWrites))
// log_operations
case "Innodb_log_waits":
- addPartialIfError(errors, m.mb.RecordMysqlLogOperationsDataPoint(now, v, "waits"))
+ addPartialIfError(errors, m.mb.RecordMysqlLogOperationsDataPoint(now, v, metadata.AttributeLogOperationsWaits))
case "Innodb_log_write_requests":
- addPartialIfError(errors, m.mb.RecordMysqlLogOperationsDataPoint(now, v, "write_requests"))
+ addPartialIfError(errors, m.mb.RecordMysqlLogOperationsDataPoint(now, v, metadata.AttributeLogOperationsWriteRequests))
case "Innodb_log_writes":
- addPartialIfError(errors, m.mb.RecordMysqlLogOperationsDataPoint(now, v, "writes"))
+ addPartialIfError(errors, m.mb.RecordMysqlLogOperationsDataPoint(now, v, metadata.AttributeLogOperationsWrites))
// operations
case "Innodb_data_fsyncs":
- addPartialIfError(errors, m.mb.RecordMysqlOperationsDataPoint(now, v, "fsyncs"))
+ addPartialIfError(errors, m.mb.RecordMysqlOperationsDataPoint(now, v, metadata.AttributeOperationsFsyncs))
case "Innodb_data_reads":
- addPartialIfError(errors, m.mb.RecordMysqlOperationsDataPoint(now, v, "reads"))
+ addPartialIfError(errors, m.mb.RecordMysqlOperationsDataPoint(now, v, metadata.AttributeOperationsReads))
case "Innodb_data_writes":
- addPartialIfError(errors, m.mb.RecordMysqlOperationsDataPoint(now, v, "writes"))
+ addPartialIfError(errors, m.mb.RecordMysqlOperationsDataPoint(now, v, metadata.AttributeOperationsWrites))
// page_operations
case "Innodb_pages_created":
- addPartialIfError(errors, m.mb.RecordMysqlPageOperationsDataPoint(now, v, "created"))
+ addPartialIfError(errors, m.mb.RecordMysqlPageOperationsDataPoint(now, v, metadata.AttributePageOperationsCreated))
case "Innodb_pages_read":
- addPartialIfError(errors, m.mb.RecordMysqlPageOperationsDataPoint(now, v, "read"))
+ addPartialIfError(errors, m.mb.RecordMysqlPageOperationsDataPoint(now, v,
+ metadata.AttributePageOperationsRead))
case "Innodb_pages_written":
- addPartialIfError(errors, m.mb.RecordMysqlPageOperationsDataPoint(now, v, "written"))
+ addPartialIfError(errors, m.mb.RecordMysqlPageOperationsDataPoint(now, v,
+ metadata.AttributePageOperationsWritten))
// row_locks
case "Innodb_row_lock_waits":
- addPartialIfError(errors, m.mb.RecordMysqlRowLocksDataPoint(now, v, "waits"))
+ addPartialIfError(errors, m.mb.RecordMysqlRowLocksDataPoint(now, v, metadata.AttributeRowLocksWaits))
case "Innodb_row_lock_time":
- addPartialIfError(errors, m.mb.RecordMysqlRowLocksDataPoint(now, v, "time"))
+ addPartialIfError(errors, m.mb.RecordMysqlRowLocksDataPoint(now, v, metadata.AttributeRowLocksTime))
// row_operations
case "Innodb_rows_deleted":
- addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, "deleted"))
+ addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, metadata.AttributeRowOperationsDeleted))
case "Innodb_rows_inserted":
- addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, "inserted"))
+ addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, metadata.AttributeRowOperationsInserted))
case "Innodb_rows_read":
- addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, "read"))
+ addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v,
+ metadata.AttributeRowOperationsRead))
case "Innodb_rows_updated":
- addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v, "updated"))
+ addPartialIfError(errors, m.mb.RecordMysqlRowOperationsDataPoint(now, v,
+ metadata.AttributeRowOperationsUpdated))
// locks
case "Table_locks_immediate":
- addPartialIfError(errors, m.mb.RecordMysqlLocksDataPoint(now, v, "immediate"))
+ addPartialIfError(errors, m.mb.RecordMysqlLocksDataPoint(now, v, metadata.AttributeLocksImmediate))
case "Table_locks_waited":
- addPartialIfError(errors, m.mb.RecordMysqlLocksDataPoint(now, v, "waited"))
+ addPartialIfError(errors, m.mb.RecordMysqlLocksDataPoint(now, v, metadata.AttributeLocksWaited))
// sorts
case "Sort_merge_passes":
- addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, "merge_passes"))
+ addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, metadata.AttributeSortsMergePasses))
case "Sort_range":
- addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, "range"))
+ addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, metadata.AttributeSortsRange))
case "Sort_rows":
- addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, "rows"))
+ addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, metadata.AttributeSortsRows))
case "Sort_scan":
- addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, "scan"))
+ addPartialIfError(errors, m.mb.RecordMysqlSortsDataPoint(now, v, metadata.AttributeSortsScan))
// threads
case "Threads_cached":
- addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, "cached"))
+ addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, metadata.AttributeThreadsCached))
case "Threads_connected":
- addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, "connected"))
+ addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, metadata.AttributeThreadsConnected))
case "Threads_created":
- addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, "created"))
+ addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, metadata.AttributeThreadsCreated))
case "Threads_running":
- addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, "running"))
+ addPartialIfError(errors, m.mb.RecordMysqlThreadsDataPoint(now, v, metadata.AttributeThreadsRunning))
}
}
@@ -272,14 +286,14 @@ func (m *mySQLScraper) recordDataPages(now pcommon.Timestamp, globalStats map[st
errors.AddPartial(2, err) // we need dirty to calculate free, so 2 data points lost here
return
}
- m.mb.RecordMysqlBufferPoolDataPagesDataPoint(now, dirty, "dirty")
+ m.mb.RecordMysqlBufferPoolDataPagesDataPoint(now, dirty, metadata.AttributeBufferPoolDataDirty)
data, err := parseInt(globalStats["Innodb_buffer_pool_pages_data"])
if err != nil {
errors.AddPartial(1, err)
return
}
- m.mb.RecordMysqlBufferPoolDataPagesDataPoint(now, data-dirty, "clean")
+ m.mb.RecordMysqlBufferPoolDataPagesDataPoint(now, data-dirty, metadata.AttributeBufferPoolDataClean)
}
func (m *mySQLScraper) recordDataUsage(now pcommon.Timestamp, globalStats map[string]string, errors scrapererror.ScrapeErrors) {
@@ -288,14 +302,14 @@ func (m *mySQLScraper) recordDataUsage(now pcommon.Timestamp, globalStats map[st
errors.AddPartial(2, err) // we need dirty to calculate free, so 2 data points lost here
return
}
- m.mb.RecordMysqlBufferPoolUsageDataPoint(now, dirty, "dirty")
+ m.mb.RecordMysqlBufferPoolUsageDataPoint(now, dirty, metadata.AttributeBufferPoolDataDirty)
data, err := parseInt(globalStats["Innodb_buffer_pool_bytes_data"])
if err != nil {
errors.AddPartial(1, err)
return
}
- m.mb.RecordMysqlBufferPoolUsageDataPoint(now, data-dirty, "clean")
+ m.mb.RecordMysqlBufferPoolUsageDataPoint(now, data-dirty, metadata.AttributeBufferPoolDataClean)
}
// parseInt converts string to int64.
diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go
index a27d7d74b805..cef75e393e0f 100644
--- a/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go
@@ -39,6 +39,40 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeState specifies the a value state attribute.
+type AttributeState int
+
+const (
+ _ AttributeState = iota
+ AttributeStateActive
+ AttributeStateReading
+ AttributeStateWriting
+ AttributeStateWaiting
+)
+
+// String returns the string representation of the AttributeState.
+func (av AttributeState) String() string {
+ switch av {
+ case AttributeStateActive:
+ return "active"
+ case AttributeStateReading:
+ return "reading"
+ case AttributeStateWriting:
+ return "writing"
+ case AttributeStateWaiting:
+ return "waiting"
+ }
+ return ""
+}
+
+// MapAttributeState is a helper map of string to AttributeState attribute value.
+var MapAttributeState = map[string]AttributeState{
+ "active": AttributeStateActive,
+ "reading": AttributeStateReading,
+ "writing": AttributeStateWriting,
+ "waiting": AttributeStateWaiting,
+}
+
type metricNginxConnectionsAccepted struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -333,8 +367,8 @@ func (mb *MetricsBuilder) RecordNginxConnectionsAcceptedDataPoint(ts pcommon.Tim
}
// RecordNginxConnectionsCurrentDataPoint adds a data point to nginx.connections_current metric.
-func (mb *MetricsBuilder) RecordNginxConnectionsCurrentDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue string) {
- mb.metricNginxConnectionsCurrent.recordDataPoint(mb.startTime, ts, val, stateAttributeValue)
+func (mb *MetricsBuilder) RecordNginxConnectionsCurrentDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue AttributeState) {
+ mb.metricNginxConnectionsCurrent.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String())
}
// RecordNginxConnectionsHandledDataPoint adds a data point to nginx.connections_handled metric.
@@ -366,16 +400,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- Active string
- Reading string
- Writing string
- Waiting string
-}{
- "active",
- "reading",
- "writing",
- "waiting",
-}
diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go
index a79dcd70d823..a1752009edd4 100644
--- a/receiver/nginxreceiver/scraper.go
+++ b/receiver/nginxreceiver/scraper.go
@@ -80,10 +80,10 @@ func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) {
r.mb.RecordNginxRequestsDataPoint(now, stats.Requests)
r.mb.RecordNginxConnectionsAcceptedDataPoint(now, stats.Connections.Accepted)
r.mb.RecordNginxConnectionsHandledDataPoint(now, stats.Connections.Handled)
- r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Active, metadata.AttributeState.Active)
- r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Reading, metadata.AttributeState.Reading)
- r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Writing, metadata.AttributeState.Writing)
- r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Waiting, metadata.AttributeState.Waiting)
+ r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Active, metadata.AttributeStateActive)
+ r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Reading, metadata.AttributeStateReading)
+ r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Writing, metadata.AttributeStateWriting)
+ r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Waiting, metadata.AttributeStateWaiting)
return r.mb.Emit(), nil
}
diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go
index dfe3623211b1..6454d04ea79f 100644
--- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go
@@ -51,6 +51,116 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeOperation specifies the a value operation attribute.
+type AttributeOperation int
+
+const (
+ _ AttributeOperation = iota
+ AttributeOperationIns
+ AttributeOperationUpd
+ AttributeOperationDel
+ AttributeOperationHotUpd
+)
+
+// String returns the string representation of the AttributeOperation.
+func (av AttributeOperation) String() string {
+ switch av {
+ case AttributeOperationIns:
+ return "ins"
+ case AttributeOperationUpd:
+ return "upd"
+ case AttributeOperationDel:
+ return "del"
+ case AttributeOperationHotUpd:
+ return "hot_upd"
+ }
+ return ""
+}
+
+// MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
+var MapAttributeOperation = map[string]AttributeOperation{
+ "ins": AttributeOperationIns,
+ "upd": AttributeOperationUpd,
+ "del": AttributeOperationDel,
+ "hot_upd": AttributeOperationHotUpd,
+}
+
+// AttributeSource specifies the a value source attribute.
+type AttributeSource int
+
+const (
+ _ AttributeSource = iota
+ AttributeSourceHeapRead
+ AttributeSourceHeapHit
+ AttributeSourceIdxRead
+ AttributeSourceIdxHit
+ AttributeSourceToastRead
+ AttributeSourceToastHit
+ AttributeSourceTidxRead
+ AttributeSourceTidxHit
+)
+
+// String returns the string representation of the AttributeSource.
+func (av AttributeSource) String() string {
+ switch av {
+ case AttributeSourceHeapRead:
+ return "heap_read"
+ case AttributeSourceHeapHit:
+ return "heap_hit"
+ case AttributeSourceIdxRead:
+ return "idx_read"
+ case AttributeSourceIdxHit:
+ return "idx_hit"
+ case AttributeSourceToastRead:
+ return "toast_read"
+ case AttributeSourceToastHit:
+ return "toast_hit"
+ case AttributeSourceTidxRead:
+ return "tidx_read"
+ case AttributeSourceTidxHit:
+ return "tidx_hit"
+ }
+ return ""
+}
+
+// MapAttributeSource is a helper map of string to AttributeSource attribute value.
+var MapAttributeSource = map[string]AttributeSource{
+ "heap_read": AttributeSourceHeapRead,
+ "heap_hit": AttributeSourceHeapHit,
+ "idx_read": AttributeSourceIdxRead,
+ "idx_hit": AttributeSourceIdxHit,
+ "toast_read": AttributeSourceToastRead,
+ "toast_hit": AttributeSourceToastHit,
+ "tidx_read": AttributeSourceTidxRead,
+ "tidx_hit": AttributeSourceTidxHit,
+}
+
+// AttributeState specifies the a value state attribute.
+type AttributeState int
+
+const (
+ _ AttributeState = iota
+ AttributeStateDead
+ AttributeStateLive
+)
+
+// String returns the string representation of the AttributeState.
+func (av AttributeState) String() string {
+ switch av {
+ case AttributeStateDead:
+ return "dead"
+ case AttributeStateLive:
+ return "live"
+ }
+ return ""
+}
+
+// MapAttributeState is a helper map of string to AttributeState attribute value.
+var MapAttributeState = map[string]AttributeState{
+ "dead": AttributeStateDead,
+ "live": AttributeStateLive,
+}
+
type metricPostgresqlBackends struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -527,8 +637,8 @@ func (mb *MetricsBuilder) RecordPostgresqlBackendsDataPoint(ts pcommon.Timestamp
}
// RecordPostgresqlBlocksReadDataPoint adds a data point to postgresql.blocks_read metric.
-func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, sourceAttributeValue string) {
- mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, sourceAttributeValue)
+func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, sourceAttributeValue AttributeSource) {
+ mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, sourceAttributeValue.String())
}
// RecordPostgresqlCommitsDataPoint adds a data point to postgresql.commits metric.
@@ -542,8 +652,8 @@ func (mb *MetricsBuilder) RecordPostgresqlDbSizeDataPoint(ts pcommon.Timestamp,
}
// RecordPostgresqlOperationsDataPoint adds a data point to postgresql.operations metric.
-func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, operationAttributeValue string) {
- mb.metricPostgresqlOperations.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, operationAttributeValue)
+func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, operationAttributeValue AttributeOperation) {
+ mb.metricPostgresqlOperations.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, operationAttributeValue.String())
}
// RecordPostgresqlRollbacksDataPoint adds a data point to postgresql.rollbacks metric.
@@ -552,8 +662,8 @@ func (mb *MetricsBuilder) RecordPostgresqlRollbacksDataPoint(ts pcommon.Timestam
}
// RecordPostgresqlRowsDataPoint adds a data point to postgresql.rows metric.
-func (mb *MetricsBuilder) RecordPostgresqlRowsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, stateAttributeValue string) {
- mb.metricPostgresqlRows.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, stateAttributeValue)
+func (mb *MetricsBuilder) RecordPostgresqlRowsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, stateAttributeValue AttributeState) {
+ mb.metricPostgresqlRows.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, stateAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
@@ -587,46 +697,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeOperation are the possible values that the attribute "operation" can have.
-var AttributeOperation = struct {
- Ins string
- Upd string
- Del string
- HotUpd string
-}{
- "ins",
- "upd",
- "del",
- "hot_upd",
-}
-
-// AttributeSource are the possible values that the attribute "source" can have.
-var AttributeSource = struct {
- HeapRead string
- HeapHit string
- IdxRead string
- IdxHit string
- ToastRead string
- ToastHit string
- TidxRead string
- TidxHit string
-}{
- "heap_read",
- "heap_hit",
- "idx_read",
- "idx_hit",
- "toast_read",
- "toast_hit",
- "tidx_read",
- "tidx_hit",
-}
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- Dead string
- Live string
-}{
- "dead",
- "live",
-}
diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go
index b79e97009ac5..4840e2aadf41 100644
--- a/receiver/postgresqlreceiver/scraper.go
+++ b/receiver/postgresqlreceiver/scraper.go
@@ -123,13 +123,18 @@ func (p *postgreSQLScraper) collectBlockReads(
return
}
for _, table := range blocksReadByTableMetrics {
- for k, v := range table.stats {
- i, err := p.parseInt(k, v)
+ for sourceKey, source := range metadata.MapAttributeSource {
+ value, ok := table.stats[sourceKey]
+ if !ok {
+ // Data isn't present, error was already logged at a lower level
+ continue
+ }
+ i, err := p.parseInt(sourceKey, value)
if err != nil {
errors.AddPartial(0, err)
continue
}
- p.mb.RecordPostgresqlBlocksReadDataPoint(now, i, table.database, table.table, k)
+ p.mb.RecordPostgresqlBlocksReadDataPoint(now, i, table.database, table.table, source)
}
}
}
@@ -151,32 +156,32 @@ func (p *postgreSQLScraper) collectDatabaseTableMetrics(
return
}
for _, table := range databaseTableMetrics {
- for _, key := range []string{"live", "dead"} {
- value, ok := table.stats[key]
+ for stateKey, state := range metadata.MapAttributeState {
+ value, ok := table.stats[stateKey]
if !ok {
// Data isn't present, error was already logged at a lower level
continue
}
- i, err := p.parseInt(key, value)
+ i, err := p.parseInt(stateKey, value)
if err != nil {
errors.AddPartial(0, err)
continue
}
- p.mb.RecordPostgresqlRowsDataPoint(now, i, table.database, table.table, key)
+ p.mb.RecordPostgresqlRowsDataPoint(now, i, table.database, table.table, state)
}
- for _, key := range []string{"ins", "upd", "del", "hot_upd"} {
- value, ok := table.stats[key]
+ for opKey, op := range metadata.MapAttributeOperation {
+ value, ok := table.stats[opKey]
if !ok {
// Data isn't present, error was already logged at a lower level
continue
}
- i, err := p.parseInt(key, value)
+ i, err := p.parseInt(opKey, value)
if err != nil {
errors.AddPartial(0, err)
continue
}
- p.mb.RecordPostgresqlOperationsDataPoint(now, i, table.database, table.table, key)
+ p.mb.RecordPostgresqlOperationsDataPoint(now, i, table.database, table.table, op)
}
}
}
diff --git a/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go b/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go
index 6ba10d322da0..9fc0dd8c56bb 100644
--- a/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go
@@ -47,6 +47,32 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeMessageState specifies the a value message.state attribute.
+type AttributeMessageState int
+
+const (
+ _ AttributeMessageState = iota
+ AttributeMessageStateReady
+ AttributeMessageStateUnacknowledged
+)
+
+// String returns the string representation of the AttributeMessageState.
+func (av AttributeMessageState) String() string {
+ switch av {
+ case AttributeMessageStateReady:
+ return "ready"
+ case AttributeMessageStateUnacknowledged:
+ return "unacknowledged"
+ }
+ return ""
+}
+
+// MapAttributeMessageState is a helper map of string to AttributeMessageState attribute value.
+var MapAttributeMessageState = map[string]AttributeMessageState{
+ "ready": AttributeMessageStateReady,
+ "unacknowledged": AttributeMessageStateUnacknowledged,
+}
+
type metricRabbitmqConsumerCount struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -477,8 +503,8 @@ func (mb *MetricsBuilder) RecordRabbitmqMessageAcknowledgedDataPoint(ts pcommon.
}
// RecordRabbitmqMessageCurrentDataPoint adds a data point to rabbitmq.message.current metric.
-func (mb *MetricsBuilder) RecordRabbitmqMessageCurrentDataPoint(ts pcommon.Timestamp, val int64, messageStateAttributeValue string) {
- mb.metricRabbitmqMessageCurrent.recordDataPoint(mb.startTime, ts, val, messageStateAttributeValue)
+func (mb *MetricsBuilder) RecordRabbitmqMessageCurrentDataPoint(ts pcommon.Timestamp, val int64, messageStateAttributeValue AttributeMessageState) {
+ mb.metricRabbitmqMessageCurrent.recordDataPoint(mb.startTime, ts, val, messageStateAttributeValue.String())
}
// RecordRabbitmqMessageDeliveredDataPoint adds a data point to rabbitmq.message.delivered metric.
@@ -515,12 +541,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeMessageState are the possible values that the attribute "message.state" can have.
-var AttributeMessageState = struct {
- Ready string
- Unacknowledged string
-}{
- "ready",
- "unacknowledged",
-}
diff --git a/receiver/rabbitmqreceiver/scraper.go b/receiver/rabbitmqreceiver/scraper.go
index 01207ae2939e..f687c85a51e6 100644
--- a/receiver/rabbitmqreceiver/scraper.go
+++ b/receiver/rabbitmqreceiver/scraper.go
@@ -98,8 +98,8 @@ func (r *rabbitmqScraper) scrape(ctx context.Context) (pmetric.Metrics, error) {
// collectQueue collects metrics
func (r *rabbitmqScraper) collectQueue(queue *models.Queue, now pcommon.Timestamp) {
r.mb.RecordRabbitmqConsumerCountDataPoint(now, queue.Consumers)
- r.mb.RecordRabbitmqMessageCurrentDataPoint(now, queue.UnacknowledgedMessages, metadata.AttributeMessageState.Unacknowledged)
- r.mb.RecordRabbitmqMessageCurrentDataPoint(now, queue.ReadyMessages, metadata.AttributeMessageState.Ready)
+ r.mb.RecordRabbitmqMessageCurrentDataPoint(now, queue.UnacknowledgedMessages, metadata.AttributeMessageStateUnacknowledged)
+ r.mb.RecordRabbitmqMessageCurrentDataPoint(now, queue.ReadyMessages, metadata.AttributeMessageStateReady)
for _, messageStatMetric := range messageStatMetrics {
// Get metric value
diff --git a/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go b/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go
index a75043e844ea..88e587131e40 100644
--- a/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go
@@ -47,6 +47,62 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeOperation specifies the a value operation attribute.
+type AttributeOperation int
+
+const (
+ _ AttributeOperation = iota
+ AttributeOperationRead
+ AttributeOperationWrite
+ AttributeOperationDelete
+)
+
+// String returns the string representation of the AttributeOperation.
+func (av AttributeOperation) String() string {
+ switch av {
+ case AttributeOperationRead:
+ return "read"
+ case AttributeOperationWrite:
+ return "write"
+ case AttributeOperationDelete:
+ return "delete"
+ }
+ return ""
+}
+
+// MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
+var MapAttributeOperation = map[string]AttributeOperation{
+ "read": AttributeOperationRead,
+ "write": AttributeOperationWrite,
+ "delete": AttributeOperationDelete,
+}
+
+// AttributeRequest specifies the a value request attribute.
+type AttributeRequest int
+
+const (
+ _ AttributeRequest = iota
+ AttributeRequestPut
+ AttributeRequestGet
+)
+
+// String returns the string representation of the AttributeRequest.
+func (av AttributeRequest) String() string {
+ switch av {
+ case AttributeRequestPut:
+ return "put"
+ case AttributeRequestGet:
+ return "get"
+ }
+ return ""
+}
+
+// MapAttributeRequest is a helper map of string to AttributeRequest attribute value.
+var MapAttributeRequest = map[string]AttributeRequest{
+ "put": AttributeRequestPut,
+ "get": AttributeRequestGet,
+}
+
type metricRiakMemoryLimit struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -462,13 +518,13 @@ func (mb *MetricsBuilder) RecordRiakMemoryLimitDataPoint(ts pcommon.Timestamp, v
}
// RecordRiakNodeOperationCountDataPoint adds a data point to riak.node.operation.count metric.
-func (mb *MetricsBuilder) RecordRiakNodeOperationCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue string) {
- mb.metricRiakNodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue)
+func (mb *MetricsBuilder) RecordRiakNodeOperationCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue AttributeRequest) {
+ mb.metricRiakNodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue.String())
}
// RecordRiakNodeOperationTimeMeanDataPoint adds a data point to riak.node.operation.time.mean metric.
-func (mb *MetricsBuilder) RecordRiakNodeOperationTimeMeanDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue string) {
- mb.metricRiakNodeOperationTimeMean.recordDataPoint(mb.startTime, ts, val, requestAttributeValue)
+func (mb *MetricsBuilder) RecordRiakNodeOperationTimeMeanDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue AttributeRequest) {
+ mb.metricRiakNodeOperationTimeMean.recordDataPoint(mb.startTime, ts, val, requestAttributeValue.String())
}
// RecordRiakNodeReadRepairCountDataPoint adds a data point to riak.node.read_repair.count metric.
@@ -477,13 +533,13 @@ func (mb *MetricsBuilder) RecordRiakNodeReadRepairCountDataPoint(ts pcommon.Time
}
// RecordRiakVnodeIndexOperationCountDataPoint adds a data point to riak.vnode.index.operation.count metric.
-func (mb *MetricsBuilder) RecordRiakVnodeIndexOperationCountDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) {
- mb.metricRiakVnodeIndexOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue)
+func (mb *MetricsBuilder) RecordRiakVnodeIndexOperationCountDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) {
+ mb.metricRiakVnodeIndexOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String())
}
// RecordRiakVnodeOperationCountDataPoint adds a data point to riak.vnode.operation.count metric.
-func (mb *MetricsBuilder) RecordRiakVnodeOperationCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue string) {
- mb.metricRiakVnodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue)
+func (mb *MetricsBuilder) RecordRiakVnodeOperationCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue AttributeRequest) {
+ mb.metricRiakVnodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
@@ -508,23 +564,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeOperation are the possible values that the attribute "operation" can have.
-var AttributeOperation = struct {
- Read string
- Write string
- Delete string
-}{
- "read",
- "write",
- "delete",
-}
-
-// AttributeRequest are the possible values that the attribute "request" can have.
-var AttributeRequest = struct {
- Put string
- Get string
-}{
- "put",
- "get",
-}
diff --git a/receiver/riakreceiver/scraper.go b/receiver/riakreceiver/scraper.go
index 7e08eb5ea535..0ae6c2c30591 100644
--- a/receiver/riakreceiver/scraper.go
+++ b/receiver/riakreceiver/scraper.go
@@ -77,12 +77,12 @@ func (r *riakScraper) collectStats(stat *model.Stats) (pmetric.Metrics, error) {
now := pcommon.NewTimestampFromTime(time.Now())
var errors scrapererror.ScrapeErrors
//scrape node.operation.count metric
- r.mb.RecordRiakNodeOperationCountDataPoint(now, stat.NodeGets, metadata.AttributeRequest.Get)
- r.mb.RecordRiakNodeOperationCountDataPoint(now, stat.NodePuts, metadata.AttributeRequest.Put)
+ r.mb.RecordRiakNodeOperationCountDataPoint(now, stat.NodeGets, metadata.AttributeRequestGet)
+ r.mb.RecordRiakNodeOperationCountDataPoint(now, stat.NodePuts, metadata.AttributeRequestPut)
//scrape node.operation.time.mean metric
- r.mb.RecordRiakNodeOperationTimeMeanDataPoint(now, stat.NodeGetFsmTimeMean, metadata.AttributeRequest.Get)
- r.mb.RecordRiakNodeOperationTimeMeanDataPoint(now, stat.NodePutFsmTimeMean, metadata.AttributeRequest.Put)
+ r.mb.RecordRiakNodeOperationTimeMeanDataPoint(now, stat.NodeGetFsmTimeMean, metadata.AttributeRequestGet)
+ r.mb.RecordRiakNodeOperationTimeMeanDataPoint(now, stat.NodePutFsmTimeMean, metadata.AttributeRequestPut)
//scrape node.read_repair.count metric
r.mb.RecordRiakNodeReadRepairCountDataPoint(now, stat.ReadRepairs)
@@ -91,13 +91,13 @@ func (r *riakScraper) collectStats(stat *model.Stats) (pmetric.Metrics, error) {
r.mb.RecordRiakMemoryLimitDataPoint(now, stat.MemAllocated)
//scrape vnode.operation.count metric
- r.mb.RecordRiakVnodeOperationCountDataPoint(now, stat.VnodeGets, metadata.AttributeRequest.Get)
- r.mb.RecordRiakVnodeOperationCountDataPoint(now, stat.VnodePuts, metadata.AttributeRequest.Put)
+ r.mb.RecordRiakVnodeOperationCountDataPoint(now, stat.VnodeGets, metadata.AttributeRequestGet)
+ r.mb.RecordRiakVnodeOperationCountDataPoint(now, stat.VnodePuts, metadata.AttributeRequestPut)
//scrape vnode.index.operation.count metric
- r.mb.RecordRiakVnodeIndexOperationCountDataPoint(now, stat.VnodeIndexReads, metadata.AttributeOperation.Read)
- r.mb.RecordRiakVnodeIndexOperationCountDataPoint(now, stat.VnodeIndexWrites, metadata.AttributeOperation.Write)
- r.mb.RecordRiakVnodeIndexOperationCountDataPoint(now, stat.VnodeIndexDeletes, metadata.AttributeOperation.Delete)
+ r.mb.RecordRiakVnodeIndexOperationCountDataPoint(now, stat.VnodeIndexReads, metadata.AttributeOperationRead)
+ r.mb.RecordRiakVnodeIndexOperationCountDataPoint(now, stat.VnodeIndexWrites, metadata.AttributeOperationWrite)
+ r.mb.RecordRiakVnodeIndexOperationCountDataPoint(now, stat.VnodeIndexDeletes, metadata.AttributeOperationDelete)
return r.mb.Emit(metadata.WithRiakNodeName(stat.Node)), errors.Combine()
}
diff --git a/receiver/saphanareceiver/internal/metadata/generated_metrics_v2.go b/receiver/saphanareceiver/internal/metadata/generated_metrics_v2.go
index dac9c3aca589..18618852d22e 100644
--- a/receiver/saphanareceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/saphanareceiver/internal/metadata/generated_metrics_v2.go
@@ -205,6 +205,518 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeActivePendingRequestState specifies the a value active_pending_request_state attribute.
+type AttributeActivePendingRequestState int
+
+const (
+ _ AttributeActivePendingRequestState = iota
+ AttributeActivePendingRequestStateActive
+ AttributeActivePendingRequestStatePending
+)
+
+// String returns the string representation of the AttributeActivePendingRequestState.
+func (av AttributeActivePendingRequestState) String() string {
+ switch av {
+ case AttributeActivePendingRequestStateActive:
+ return "active"
+ case AttributeActivePendingRequestStatePending:
+ return "pending"
+ }
+ return ""
+}
+
+// MapAttributeActivePendingRequestState is a helper map of string to AttributeActivePendingRequestState attribute value.
+var MapAttributeActivePendingRequestState = map[string]AttributeActivePendingRequestState{
+ "active": AttributeActivePendingRequestStateActive,
+ "pending": AttributeActivePendingRequestStatePending,
+}
+
+// AttributeColumnMemorySubtype specifies the a value column_memory_subtype attribute.
+type AttributeColumnMemorySubtype int
+
+const (
+ _ AttributeColumnMemorySubtype = iota
+ AttributeColumnMemorySubtypeData
+ AttributeColumnMemorySubtypeDict
+ AttributeColumnMemorySubtypeIndex
+ AttributeColumnMemorySubtypeMisc
+)
+
+// String returns the string representation of the AttributeColumnMemorySubtype.
+func (av AttributeColumnMemorySubtype) String() string {
+ switch av {
+ case AttributeColumnMemorySubtypeData:
+ return "data"
+ case AttributeColumnMemorySubtypeDict:
+ return "dict"
+ case AttributeColumnMemorySubtypeIndex:
+ return "index"
+ case AttributeColumnMemorySubtypeMisc:
+ return "misc"
+ }
+ return ""
+}
+
+// MapAttributeColumnMemorySubtype is a helper map of string to AttributeColumnMemorySubtype attribute value.
+var MapAttributeColumnMemorySubtype = map[string]AttributeColumnMemorySubtype{
+ "data": AttributeColumnMemorySubtypeData,
+ "dict": AttributeColumnMemorySubtypeDict,
+ "index": AttributeColumnMemorySubtypeIndex,
+ "misc": AttributeColumnMemorySubtypeMisc,
+}
+
+// AttributeColumnMemoryType specifies the a value column_memory_type attribute.
+type AttributeColumnMemoryType int
+
+const (
+ _ AttributeColumnMemoryType = iota
+ AttributeColumnMemoryTypeMain
+ AttributeColumnMemoryTypeDelta
+)
+
+// String returns the string representation of the AttributeColumnMemoryType.
+func (av AttributeColumnMemoryType) String() string {
+ switch av {
+ case AttributeColumnMemoryTypeMain:
+ return "main"
+ case AttributeColumnMemoryTypeDelta:
+ return "delta"
+ }
+ return ""
+}
+
+// MapAttributeColumnMemoryType is a helper map of string to AttributeColumnMemoryType attribute value.
+var MapAttributeColumnMemoryType = map[string]AttributeColumnMemoryType{
+ "main": AttributeColumnMemoryTypeMain,
+ "delta": AttributeColumnMemoryTypeDelta,
+}
+
+// AttributeConnectionStatus specifies the a value connection_status attribute.
+type AttributeConnectionStatus int
+
+const (
+ _ AttributeConnectionStatus = iota
+ AttributeConnectionStatusRunning
+ AttributeConnectionStatusIdle
+ AttributeConnectionStatusQueueing
+)
+
+// String returns the string representation of the AttributeConnectionStatus.
+func (av AttributeConnectionStatus) String() string {
+ switch av {
+ case AttributeConnectionStatusRunning:
+ return "running"
+ case AttributeConnectionStatusIdle:
+ return "idle"
+ case AttributeConnectionStatusQueueing:
+ return "queueing"
+ }
+ return ""
+}
+
+// MapAttributeConnectionStatus is a helper map of string to AttributeConnectionStatus attribute value.
+var MapAttributeConnectionStatus = map[string]AttributeConnectionStatus{
+ "running": AttributeConnectionStatusRunning,
+ "idle": AttributeConnectionStatusIdle,
+ "queueing": AttributeConnectionStatusQueueing,
+}
+
+// AttributeCPUType specifies the a value cpu_type attribute.
+type AttributeCPUType int
+
+const (
+ _ AttributeCPUType = iota
+ AttributeCPUTypeUser
+ AttributeCPUTypeSystem
+ AttributeCPUTypeIoWait
+ AttributeCPUTypeIdle
+)
+
+// String returns the string representation of the AttributeCPUType.
+func (av AttributeCPUType) String() string {
+ switch av {
+ case AttributeCPUTypeUser:
+ return "user"
+ case AttributeCPUTypeSystem:
+ return "system"
+ case AttributeCPUTypeIoWait:
+ return "io_wait"
+ case AttributeCPUTypeIdle:
+ return "idle"
+ }
+ return ""
+}
+
+// MapAttributeCPUType is a helper map of string to AttributeCPUType attribute value.
+var MapAttributeCPUType = map[string]AttributeCPUType{
+ "user": AttributeCPUTypeUser,
+ "system": AttributeCPUTypeSystem,
+ "io_wait": AttributeCPUTypeIoWait,
+ "idle": AttributeCPUTypeIdle,
+}
+
+// AttributeDiskStateUsedFree specifies the a value disk_state_used_free attribute.
+type AttributeDiskStateUsedFree int
+
+const (
+ _ AttributeDiskStateUsedFree = iota
+ AttributeDiskStateUsedFreeUsed
+ AttributeDiskStateUsedFreeFree
+)
+
+// String returns the string representation of the AttributeDiskStateUsedFree.
+func (av AttributeDiskStateUsedFree) String() string {
+ switch av {
+ case AttributeDiskStateUsedFreeUsed:
+ return "used"
+ case AttributeDiskStateUsedFreeFree:
+ return "free"
+ }
+ return ""
+}
+
+// MapAttributeDiskStateUsedFree is a helper map of string to AttributeDiskStateUsedFree attribute value.
+var MapAttributeDiskStateUsedFree = map[string]AttributeDiskStateUsedFree{
+ "used": AttributeDiskStateUsedFreeUsed,
+ "free": AttributeDiskStateUsedFreeFree,
+}
+
+// AttributeHostSwapState specifies the a value host_swap_state attribute.
+type AttributeHostSwapState int
+
+const (
+ _ AttributeHostSwapState = iota
+ AttributeHostSwapStateUsed
+ AttributeHostSwapStateFree
+)
+
+// String returns the string representation of the AttributeHostSwapState.
+func (av AttributeHostSwapState) String() string {
+ switch av {
+ case AttributeHostSwapStateUsed:
+ return "used"
+ case AttributeHostSwapStateFree:
+ return "free"
+ }
+ return ""
+}
+
+// MapAttributeHostSwapState is a helper map of string to AttributeHostSwapState attribute value.
+var MapAttributeHostSwapState = map[string]AttributeHostSwapState{
+ "used": AttributeHostSwapStateUsed,
+ "free": AttributeHostSwapStateFree,
+}
+
+// AttributeInternalExternalRequestType specifies the a value internal_external_request_type attribute.
+type AttributeInternalExternalRequestType int
+
+const (
+ _ AttributeInternalExternalRequestType = iota
+ AttributeInternalExternalRequestTypeInternal
+ AttributeInternalExternalRequestTypeExternal
+)
+
+// String returns the string representation of the AttributeInternalExternalRequestType.
+func (av AttributeInternalExternalRequestType) String() string {
+ switch av {
+ case AttributeInternalExternalRequestTypeInternal:
+ return "internal"
+ case AttributeInternalExternalRequestTypeExternal:
+ return "external"
+ }
+ return ""
+}
+
+// MapAttributeInternalExternalRequestType is a helper map of string to AttributeInternalExternalRequestType attribute value.
+var MapAttributeInternalExternalRequestType = map[string]AttributeInternalExternalRequestType{
+ "internal": AttributeInternalExternalRequestTypeInternal,
+ "external": AttributeInternalExternalRequestTypeExternal,
+}
+
+// AttributeMemoryStateUsedFree specifies the a value memory_state_used_free attribute.
+type AttributeMemoryStateUsedFree int
+
+const (
+ _ AttributeMemoryStateUsedFree = iota
+ AttributeMemoryStateUsedFreeUsed
+ AttributeMemoryStateUsedFreeFree
+)
+
+// String returns the string representation of the AttributeMemoryStateUsedFree.
+func (av AttributeMemoryStateUsedFree) String() string {
+ switch av {
+ case AttributeMemoryStateUsedFreeUsed:
+ return "used"
+ case AttributeMemoryStateUsedFreeFree:
+ return "free"
+ }
+ return ""
+}
+
+// MapAttributeMemoryStateUsedFree is a helper map of string to AttributeMemoryStateUsedFree attribute value.
+var MapAttributeMemoryStateUsedFree = map[string]AttributeMemoryStateUsedFree{
+ "used": AttributeMemoryStateUsedFreeUsed,
+ "free": AttributeMemoryStateUsedFreeFree,
+}
+
+// AttributeRowMemoryType specifies the a value row_memory_type attribute.
+type AttributeRowMemoryType int
+
+const (
+ _ AttributeRowMemoryType = iota
+ AttributeRowMemoryTypeFixed
+ AttributeRowMemoryTypeVariable
+)
+
+// String returns the string representation of the AttributeRowMemoryType.
+func (av AttributeRowMemoryType) String() string {
+ switch av {
+ case AttributeRowMemoryTypeFixed:
+ return "fixed"
+ case AttributeRowMemoryTypeVariable:
+ return "variable"
+ }
+ return ""
+}
+
+// MapAttributeRowMemoryType is a helper map of string to AttributeRowMemoryType attribute value.
+var MapAttributeRowMemoryType = map[string]AttributeRowMemoryType{
+ "fixed": AttributeRowMemoryTypeFixed,
+ "variable": AttributeRowMemoryTypeVariable,
+}
+
+// AttributeSchemaMemoryType specifies the a value schema_memory_type attribute.
+type AttributeSchemaMemoryType int
+
+const (
+ _ AttributeSchemaMemoryType = iota
+ AttributeSchemaMemoryTypeMain
+ AttributeSchemaMemoryTypeDelta
+ AttributeSchemaMemoryTypeHistoryMain
+ AttributeSchemaMemoryTypeHistoryDelta
+)
+
+// String returns the string representation of the AttributeSchemaMemoryType.
+func (av AttributeSchemaMemoryType) String() string {
+ switch av {
+ case AttributeSchemaMemoryTypeMain:
+ return "main"
+ case AttributeSchemaMemoryTypeDelta:
+ return "delta"
+ case AttributeSchemaMemoryTypeHistoryMain:
+ return "history_main"
+ case AttributeSchemaMemoryTypeHistoryDelta:
+ return "history_delta"
+ }
+ return ""
+}
+
+// MapAttributeSchemaMemoryType is a helper map of string to AttributeSchemaMemoryType attribute value.
+var MapAttributeSchemaMemoryType = map[string]AttributeSchemaMemoryType{
+ "main": AttributeSchemaMemoryTypeMain,
+ "delta": AttributeSchemaMemoryTypeDelta,
+ "history_main": AttributeSchemaMemoryTypeHistoryMain,
+ "history_delta": AttributeSchemaMemoryTypeHistoryDelta,
+}
+
+// AttributeSchemaOperationType specifies the a value schema_operation_type attribute.
+type AttributeSchemaOperationType int
+
+const (
+ _ AttributeSchemaOperationType = iota
+ AttributeSchemaOperationTypeRead
+ AttributeSchemaOperationTypeWrite
+ AttributeSchemaOperationTypeMerge
+)
+
+// String returns the string representation of the AttributeSchemaOperationType.
+func (av AttributeSchemaOperationType) String() string {
+ switch av {
+ case AttributeSchemaOperationTypeRead:
+ return "read"
+ case AttributeSchemaOperationTypeWrite:
+ return "write"
+ case AttributeSchemaOperationTypeMerge:
+ return "merge"
+ }
+ return ""
+}
+
+// MapAttributeSchemaOperationType is a helper map of string to AttributeSchemaOperationType attribute value.
+var MapAttributeSchemaOperationType = map[string]AttributeSchemaOperationType{
+ "read": AttributeSchemaOperationTypeRead,
+ "write": AttributeSchemaOperationTypeWrite,
+ "merge": AttributeSchemaOperationTypeMerge,
+}
+
+// AttributeSchemaRecordType specifies the a value schema_record_type attribute.
+type AttributeSchemaRecordType int
+
+const (
+ _ AttributeSchemaRecordType = iota
+ AttributeSchemaRecordTypeMain
+ AttributeSchemaRecordTypeDelta
+ AttributeSchemaRecordTypeHistoryMain
+ AttributeSchemaRecordTypeHistoryDelta
+)
+
+// String returns the string representation of the AttributeSchemaRecordType.
+func (av AttributeSchemaRecordType) String() string {
+ switch av {
+ case AttributeSchemaRecordTypeMain:
+ return "main"
+ case AttributeSchemaRecordTypeDelta:
+ return "delta"
+ case AttributeSchemaRecordTypeHistoryMain:
+ return "history_main"
+ case AttributeSchemaRecordTypeHistoryDelta:
+ return "history_delta"
+ }
+ return ""
+}
+
+// MapAttributeSchemaRecordType is a helper map of string to AttributeSchemaRecordType attribute value.
+var MapAttributeSchemaRecordType = map[string]AttributeSchemaRecordType{
+ "main": AttributeSchemaRecordTypeMain,
+ "delta": AttributeSchemaRecordTypeDelta,
+ "history_main": AttributeSchemaRecordTypeHistoryMain,
+ "history_delta": AttributeSchemaRecordTypeHistoryDelta,
+}
+
+// AttributeServiceMemoryUsedType specifies the a value service_memory_used_type attribute.
+type AttributeServiceMemoryUsedType int
+
+const (
+ _ AttributeServiceMemoryUsedType = iota
+ AttributeServiceMemoryUsedTypeLogical
+ AttributeServiceMemoryUsedTypePhysical
+)
+
+// String returns the string representation of the AttributeServiceMemoryUsedType.
+func (av AttributeServiceMemoryUsedType) String() string {
+ switch av {
+ case AttributeServiceMemoryUsedTypeLogical:
+ return "logical"
+ case AttributeServiceMemoryUsedTypePhysical:
+ return "physical"
+ }
+ return ""
+}
+
+// MapAttributeServiceMemoryUsedType is a helper map of string to AttributeServiceMemoryUsedType attribute value.
+var MapAttributeServiceMemoryUsedType = map[string]AttributeServiceMemoryUsedType{
+ "logical": AttributeServiceMemoryUsedTypeLogical,
+ "physical": AttributeServiceMemoryUsedTypePhysical,
+}
+
+// AttributeServiceStatus specifies the a value service_status attribute.
+type AttributeServiceStatus int
+
+const (
+ _ AttributeServiceStatus = iota
+ AttributeServiceStatusActive
+ AttributeServiceStatusInactive
+)
+
+// String returns the string representation of the AttributeServiceStatus.
+func (av AttributeServiceStatus) String() string {
+ switch av {
+ case AttributeServiceStatusActive:
+ return "active"
+ case AttributeServiceStatusInactive:
+ return "inactive"
+ }
+ return ""
+}
+
+// MapAttributeServiceStatus is a helper map of string to AttributeServiceStatus attribute value.
+var MapAttributeServiceStatus = map[string]AttributeServiceStatus{
+ "active": AttributeServiceStatusActive,
+ "inactive": AttributeServiceStatusInactive,
+}
+
+// AttributeThreadStatus specifies the a value thread_status attribute.
+type AttributeThreadStatus int
+
+const (
+ _ AttributeThreadStatus = iota
+ AttributeThreadStatusActive
+ AttributeThreadStatusInactive
+)
+
+// String returns the string representation of the AttributeThreadStatus.
+func (av AttributeThreadStatus) String() string {
+ switch av {
+ case AttributeThreadStatusActive:
+ return "active"
+ case AttributeThreadStatusInactive:
+ return "inactive"
+ }
+ return ""
+}
+
+// MapAttributeThreadStatus is a helper map of string to AttributeThreadStatus attribute value.
+var MapAttributeThreadStatus = map[string]AttributeThreadStatus{
+ "active": AttributeThreadStatusActive,
+ "inactive": AttributeThreadStatusInactive,
+}
+
+// AttributeTransactionType specifies the a value transaction_type attribute.
+type AttributeTransactionType int
+
+const (
+ _ AttributeTransactionType = iota
+ AttributeTransactionTypeUpdate
+ AttributeTransactionTypeCommit
+ AttributeTransactionTypeRollback
+)
+
+// String returns the string representation of the AttributeTransactionType.
+func (av AttributeTransactionType) String() string {
+ switch av {
+ case AttributeTransactionTypeUpdate:
+ return "update"
+ case AttributeTransactionTypeCommit:
+ return "commit"
+ case AttributeTransactionTypeRollback:
+ return "rollback"
+ }
+ return ""
+}
+
+// MapAttributeTransactionType is a helper map of string to AttributeTransactionType attribute value.
+var MapAttributeTransactionType = map[string]AttributeTransactionType{
+ "update": AttributeTransactionTypeUpdate,
+ "commit": AttributeTransactionTypeCommit,
+ "rollback": AttributeTransactionTypeRollback,
+}
+
+// AttributeVolumeOperationType specifies the a value volume_operation_type attribute.
+type AttributeVolumeOperationType int
+
+const (
+ _ AttributeVolumeOperationType = iota
+ AttributeVolumeOperationTypeRead
+ AttributeVolumeOperationTypeWrite
+)
+
+// String returns the string representation of the AttributeVolumeOperationType.
+func (av AttributeVolumeOperationType) String() string {
+ switch av {
+ case AttributeVolumeOperationTypeRead:
+ return "read"
+ case AttributeVolumeOperationTypeWrite:
+ return "write"
+ }
+ return ""
+}
+
+// MapAttributeVolumeOperationType is a helper map of string to AttributeVolumeOperationType attribute value.
+var MapAttributeVolumeOperationType = map[string]AttributeVolumeOperationType{
+ "read": AttributeVolumeOperationTypeRead,
+ "write": AttributeVolumeOperationTypeWrite,
+}
+
type metricSaphanaAlertCount struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -2840,11 +3352,11 @@ func (mb *MetricsBuilder) RecordSaphanaBackupLatestDataPoint(ts pcommon.Timestam
}
// RecordSaphanaColumnMemoryUsedDataPoint adds a data point to saphana.column.memory.used metric.
-func (mb *MetricsBuilder) RecordSaphanaColumnMemoryUsedDataPoint(ts pcommon.Timestamp, val string, columnMemoryTypeAttributeValue string, columnMemorySubtypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaColumnMemoryUsedDataPoint(ts pcommon.Timestamp, val string, columnMemoryTypeAttributeValue AttributeColumnMemoryType, columnMemorySubtypeAttributeValue AttributeColumnMemorySubtype) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaColumnMemoryUsed, value was %s: %w", val, err)
} else {
- mb.metricSaphanaColumnMemoryUsed.recordDataPoint(mb.startTime, ts, i, columnMemoryTypeAttributeValue, columnMemorySubtypeAttributeValue)
+ mb.metricSaphanaColumnMemoryUsed.recordDataPoint(mb.startTime, ts, i, columnMemoryTypeAttributeValue.String(), columnMemorySubtypeAttributeValue.String())
}
return nil
}
@@ -2860,51 +3372,51 @@ func (mb *MetricsBuilder) RecordSaphanaComponentMemoryUsedDataPoint(ts pcommon.T
}
// RecordSaphanaConnectionCountDataPoint adds a data point to saphana.connection.count metric.
-func (mb *MetricsBuilder) RecordSaphanaConnectionCountDataPoint(ts pcommon.Timestamp, val string, connectionStatusAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaConnectionCountDataPoint(ts pcommon.Timestamp, val string, connectionStatusAttributeValue AttributeConnectionStatus) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaConnectionCount, value was %s: %w", val, err)
} else {
- mb.metricSaphanaConnectionCount.recordDataPoint(mb.startTime, ts, i, connectionStatusAttributeValue)
+ mb.metricSaphanaConnectionCount.recordDataPoint(mb.startTime, ts, i, connectionStatusAttributeValue.String())
}
return nil
}
// RecordSaphanaCPUUsedDataPoint adds a data point to saphana.cpu.used metric.
-func (mb *MetricsBuilder) RecordSaphanaCPUUsedDataPoint(ts pcommon.Timestamp, val string, cpuTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaCPUUsedDataPoint(ts pcommon.Timestamp, val string, cpuTypeAttributeValue AttributeCPUType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaCPUUsed, value was %s: %w", val, err)
} else {
- mb.metricSaphanaCPUUsed.recordDataPoint(mb.startTime, ts, i, cpuTypeAttributeValue)
+ mb.metricSaphanaCPUUsed.recordDataPoint(mb.startTime, ts, i, cpuTypeAttributeValue.String())
}
return nil
}
// RecordSaphanaDiskSizeCurrentDataPoint adds a data point to saphana.disk.size.current metric.
-func (mb *MetricsBuilder) RecordSaphanaDiskSizeCurrentDataPoint(ts pcommon.Timestamp, val string, pathAttributeValue string, diskUsageTypeAttributeValue string, diskStateUsedFreeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaDiskSizeCurrentDataPoint(ts pcommon.Timestamp, val string, pathAttributeValue string, diskUsageTypeAttributeValue string, diskStateUsedFreeAttributeValue AttributeDiskStateUsedFree) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaDiskSizeCurrent, value was %s: %w", val, err)
} else {
- mb.metricSaphanaDiskSizeCurrent.recordDataPoint(mb.startTime, ts, i, pathAttributeValue, diskUsageTypeAttributeValue, diskStateUsedFreeAttributeValue)
+ mb.metricSaphanaDiskSizeCurrent.recordDataPoint(mb.startTime, ts, i, pathAttributeValue, diskUsageTypeAttributeValue, diskStateUsedFreeAttributeValue.String())
}
return nil
}
// RecordSaphanaHostMemoryCurrentDataPoint adds a data point to saphana.host.memory.current metric.
-func (mb *MetricsBuilder) RecordSaphanaHostMemoryCurrentDataPoint(ts pcommon.Timestamp, val string, memoryStateUsedFreeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaHostMemoryCurrentDataPoint(ts pcommon.Timestamp, val string, memoryStateUsedFreeAttributeValue AttributeMemoryStateUsedFree) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaHostMemoryCurrent, value was %s: %w", val, err)
} else {
- mb.metricSaphanaHostMemoryCurrent.recordDataPoint(mb.startTime, ts, i, memoryStateUsedFreeAttributeValue)
+ mb.metricSaphanaHostMemoryCurrent.recordDataPoint(mb.startTime, ts, i, memoryStateUsedFreeAttributeValue.String())
}
return nil
}
// RecordSaphanaHostSwapCurrentDataPoint adds a data point to saphana.host.swap.current metric.
-func (mb *MetricsBuilder) RecordSaphanaHostSwapCurrentDataPoint(ts pcommon.Timestamp, val string, hostSwapStateAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaHostSwapCurrentDataPoint(ts pcommon.Timestamp, val string, hostSwapStateAttributeValue AttributeHostSwapState) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaHostSwapCurrent, value was %s: %w", val, err)
} else {
- mb.metricSaphanaHostSwapCurrent.recordDataPoint(mb.startTime, ts, i, hostSwapStateAttributeValue)
+ mb.metricSaphanaHostSwapCurrent.recordDataPoint(mb.startTime, ts, i, hostSwapStateAttributeValue.String())
}
return nil
}
@@ -2920,11 +3432,11 @@ func (mb *MetricsBuilder) RecordSaphanaInstanceCodeSizeDataPoint(ts pcommon.Time
}
// RecordSaphanaInstanceMemoryCurrentDataPoint adds a data point to saphana.instance.memory.current metric.
-func (mb *MetricsBuilder) RecordSaphanaInstanceMemoryCurrentDataPoint(ts pcommon.Timestamp, val string, memoryStateUsedFreeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaInstanceMemoryCurrentDataPoint(ts pcommon.Timestamp, val string, memoryStateUsedFreeAttributeValue AttributeMemoryStateUsedFree) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaInstanceMemoryCurrent, value was %s: %w", val, err)
} else {
- mb.metricSaphanaInstanceMemoryCurrent.recordDataPoint(mb.startTime, ts, i, memoryStateUsedFreeAttributeValue)
+ mb.metricSaphanaInstanceMemoryCurrent.recordDataPoint(mb.startTime, ts, i, memoryStateUsedFreeAttributeValue.String())
}
return nil
}
@@ -2990,21 +3502,21 @@ func (mb *MetricsBuilder) RecordSaphanaNetworkRequestAverageTimeDataPoint(ts pco
}
// RecordSaphanaNetworkRequestCountDataPoint adds a data point to saphana.network.request.count metric.
-func (mb *MetricsBuilder) RecordSaphanaNetworkRequestCountDataPoint(ts pcommon.Timestamp, val string, activePendingRequestStateAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaNetworkRequestCountDataPoint(ts pcommon.Timestamp, val string, activePendingRequestStateAttributeValue AttributeActivePendingRequestState) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaNetworkRequestCount, value was %s: %w", val, err)
} else {
- mb.metricSaphanaNetworkRequestCount.recordDataPoint(mb.startTime, ts, i, activePendingRequestStateAttributeValue)
+ mb.metricSaphanaNetworkRequestCount.recordDataPoint(mb.startTime, ts, i, activePendingRequestStateAttributeValue.String())
}
return nil
}
// RecordSaphanaNetworkRequestFinishedCountDataPoint adds a data point to saphana.network.request.finished.count metric.
-func (mb *MetricsBuilder) RecordSaphanaNetworkRequestFinishedCountDataPoint(ts pcommon.Timestamp, val string, internalExternalRequestTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaNetworkRequestFinishedCountDataPoint(ts pcommon.Timestamp, val string, internalExternalRequestTypeAttributeValue AttributeInternalExternalRequestType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaNetworkRequestFinishedCount, value was %s: %w", val, err)
} else {
- mb.metricSaphanaNetworkRequestFinishedCount.recordDataPoint(mb.startTime, ts, i, internalExternalRequestTypeAttributeValue)
+ mb.metricSaphanaNetworkRequestFinishedCount.recordDataPoint(mb.startTime, ts, i, internalExternalRequestTypeAttributeValue.String())
}
return nil
}
@@ -3040,21 +3552,21 @@ func (mb *MetricsBuilder) RecordSaphanaReplicationBacklogTimeDataPoint(ts pcommo
}
// RecordSaphanaRowStoreMemoryUsedDataPoint adds a data point to saphana.row_store.memory.used metric.
-func (mb *MetricsBuilder) RecordSaphanaRowStoreMemoryUsedDataPoint(ts pcommon.Timestamp, val string, rowMemoryTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaRowStoreMemoryUsedDataPoint(ts pcommon.Timestamp, val string, rowMemoryTypeAttributeValue AttributeRowMemoryType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaRowStoreMemoryUsed, value was %s: %w", val, err)
} else {
- mb.metricSaphanaRowStoreMemoryUsed.recordDataPoint(mb.startTime, ts, i, rowMemoryTypeAttributeValue)
+ mb.metricSaphanaRowStoreMemoryUsed.recordDataPoint(mb.startTime, ts, i, rowMemoryTypeAttributeValue.String())
}
return nil
}
// RecordSaphanaSchemaMemoryUsedCurrentDataPoint adds a data point to saphana.schema.memory.used.current metric.
-func (mb *MetricsBuilder) RecordSaphanaSchemaMemoryUsedCurrentDataPoint(ts pcommon.Timestamp, val string, schemaAttributeValue string, schemaMemoryTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaSchemaMemoryUsedCurrentDataPoint(ts pcommon.Timestamp, val string, schemaAttributeValue string, schemaMemoryTypeAttributeValue AttributeSchemaMemoryType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaSchemaMemoryUsedCurrent, value was %s: %w", val, err)
} else {
- mb.metricSaphanaSchemaMemoryUsedCurrent.recordDataPoint(mb.startTime, ts, i, schemaAttributeValue, schemaMemoryTypeAttributeValue)
+ mb.metricSaphanaSchemaMemoryUsedCurrent.recordDataPoint(mb.startTime, ts, i, schemaAttributeValue, schemaMemoryTypeAttributeValue.String())
}
return nil
}
@@ -3070,11 +3582,11 @@ func (mb *MetricsBuilder) RecordSaphanaSchemaMemoryUsedMaxDataPoint(ts pcommon.T
}
// RecordSaphanaSchemaOperationCountDataPoint adds a data point to saphana.schema.operation.count metric.
-func (mb *MetricsBuilder) RecordSaphanaSchemaOperationCountDataPoint(ts pcommon.Timestamp, val string, schemaAttributeValue string, schemaOperationTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaSchemaOperationCountDataPoint(ts pcommon.Timestamp, val string, schemaAttributeValue string, schemaOperationTypeAttributeValue AttributeSchemaOperationType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaSchemaOperationCount, value was %s: %w", val, err)
} else {
- mb.metricSaphanaSchemaOperationCount.recordDataPoint(mb.startTime, ts, i, schemaAttributeValue, schemaOperationTypeAttributeValue)
+ mb.metricSaphanaSchemaOperationCount.recordDataPoint(mb.startTime, ts, i, schemaAttributeValue, schemaOperationTypeAttributeValue.String())
}
return nil
}
@@ -3090,11 +3602,11 @@ func (mb *MetricsBuilder) RecordSaphanaSchemaRecordCompressedCountDataPoint(ts p
}
// RecordSaphanaSchemaRecordCountDataPoint adds a data point to saphana.schema.record.count metric.
-func (mb *MetricsBuilder) RecordSaphanaSchemaRecordCountDataPoint(ts pcommon.Timestamp, val string, schemaAttributeValue string, schemaRecordTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaSchemaRecordCountDataPoint(ts pcommon.Timestamp, val string, schemaAttributeValue string, schemaRecordTypeAttributeValue AttributeSchemaRecordType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaSchemaRecordCount, value was %s: %w", val, err)
} else {
- mb.metricSaphanaSchemaRecordCount.recordDataPoint(mb.startTime, ts, i, schemaAttributeValue, schemaRecordTypeAttributeValue)
+ mb.metricSaphanaSchemaRecordCount.recordDataPoint(mb.startTime, ts, i, schemaAttributeValue, schemaRecordTypeAttributeValue.String())
}
return nil
}
@@ -3110,11 +3622,11 @@ func (mb *MetricsBuilder) RecordSaphanaServiceCodeSizeDataPoint(ts pcommon.Times
}
// RecordSaphanaServiceCountDataPoint adds a data point to saphana.service.count metric.
-func (mb *MetricsBuilder) RecordSaphanaServiceCountDataPoint(ts pcommon.Timestamp, val string, serviceStatusAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaServiceCountDataPoint(ts pcommon.Timestamp, val string, serviceStatusAttributeValue AttributeServiceStatus) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaServiceCount, value was %s: %w", val, err)
} else {
- mb.metricSaphanaServiceCount.recordDataPoint(mb.startTime, ts, i, serviceStatusAttributeValue)
+ mb.metricSaphanaServiceCount.recordDataPoint(mb.startTime, ts, i, serviceStatusAttributeValue.String())
}
return nil
}
@@ -3150,11 +3662,11 @@ func (mb *MetricsBuilder) RecordSaphanaServiceMemoryEffectiveLimitDataPoint(ts p
}
// RecordSaphanaServiceMemoryHeapCurrentDataPoint adds a data point to saphana.service.memory.heap.current metric.
-func (mb *MetricsBuilder) RecordSaphanaServiceMemoryHeapCurrentDataPoint(ts pcommon.Timestamp, val string, serviceAttributeValue string, memoryStateUsedFreeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaServiceMemoryHeapCurrentDataPoint(ts pcommon.Timestamp, val string, serviceAttributeValue string, memoryStateUsedFreeAttributeValue AttributeMemoryStateUsedFree) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaServiceMemoryHeapCurrent, value was %s: %w", val, err)
} else {
- mb.metricSaphanaServiceMemoryHeapCurrent.recordDataPoint(mb.startTime, ts, i, serviceAttributeValue, memoryStateUsedFreeAttributeValue)
+ mb.metricSaphanaServiceMemoryHeapCurrent.recordDataPoint(mb.startTime, ts, i, serviceAttributeValue, memoryStateUsedFreeAttributeValue.String())
}
return nil
}
@@ -3170,21 +3682,21 @@ func (mb *MetricsBuilder) RecordSaphanaServiceMemoryLimitDataPoint(ts pcommon.Ti
}
// RecordSaphanaServiceMemorySharedCurrentDataPoint adds a data point to saphana.service.memory.shared.current metric.
-func (mb *MetricsBuilder) RecordSaphanaServiceMemorySharedCurrentDataPoint(ts pcommon.Timestamp, val string, serviceAttributeValue string, memoryStateUsedFreeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaServiceMemorySharedCurrentDataPoint(ts pcommon.Timestamp, val string, serviceAttributeValue string, memoryStateUsedFreeAttributeValue AttributeMemoryStateUsedFree) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaServiceMemorySharedCurrent, value was %s: %w", val, err)
} else {
- mb.metricSaphanaServiceMemorySharedCurrent.recordDataPoint(mb.startTime, ts, i, serviceAttributeValue, memoryStateUsedFreeAttributeValue)
+ mb.metricSaphanaServiceMemorySharedCurrent.recordDataPoint(mb.startTime, ts, i, serviceAttributeValue, memoryStateUsedFreeAttributeValue.String())
}
return nil
}
// RecordSaphanaServiceMemoryUsedDataPoint adds a data point to saphana.service.memory.used metric.
-func (mb *MetricsBuilder) RecordSaphanaServiceMemoryUsedDataPoint(ts pcommon.Timestamp, val string, serviceAttributeValue string, serviceMemoryUsedTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaServiceMemoryUsedDataPoint(ts pcommon.Timestamp, val string, serviceAttributeValue string, serviceMemoryUsedTypeAttributeValue AttributeServiceMemoryUsedType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaServiceMemoryUsed, value was %s: %w", val, err)
} else {
- mb.metricSaphanaServiceMemoryUsed.recordDataPoint(mb.startTime, ts, i, serviceAttributeValue, serviceMemoryUsedTypeAttributeValue)
+ mb.metricSaphanaServiceMemoryUsed.recordDataPoint(mb.startTime, ts, i, serviceAttributeValue, serviceMemoryUsedTypeAttributeValue.String())
}
return nil
}
@@ -3200,11 +3712,11 @@ func (mb *MetricsBuilder) RecordSaphanaServiceStackSizeDataPoint(ts pcommon.Time
}
// RecordSaphanaServiceThreadCountDataPoint adds a data point to saphana.service.thread.count metric.
-func (mb *MetricsBuilder) RecordSaphanaServiceThreadCountDataPoint(ts pcommon.Timestamp, val string, threadStatusAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaServiceThreadCountDataPoint(ts pcommon.Timestamp, val string, threadStatusAttributeValue AttributeThreadStatus) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaServiceThreadCount, value was %s: %w", val, err)
} else {
- mb.metricSaphanaServiceThreadCount.recordDataPoint(mb.startTime, ts, i, threadStatusAttributeValue)
+ mb.metricSaphanaServiceThreadCount.recordDataPoint(mb.startTime, ts, i, threadStatusAttributeValue.String())
}
return nil
}
@@ -3220,11 +3732,11 @@ func (mb *MetricsBuilder) RecordSaphanaTransactionBlockedDataPoint(ts pcommon.Ti
}
// RecordSaphanaTransactionCountDataPoint adds a data point to saphana.transaction.count metric.
-func (mb *MetricsBuilder) RecordSaphanaTransactionCountDataPoint(ts pcommon.Timestamp, val string, transactionTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaTransactionCountDataPoint(ts pcommon.Timestamp, val string, transactionTypeAttributeValue AttributeTransactionType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaTransactionCount, value was %s: %w", val, err)
} else {
- mb.metricSaphanaTransactionCount.recordDataPoint(mb.startTime, ts, i, transactionTypeAttributeValue)
+ mb.metricSaphanaTransactionCount.recordDataPoint(mb.startTime, ts, i, transactionTypeAttributeValue.String())
}
return nil
}
@@ -3240,31 +3752,31 @@ func (mb *MetricsBuilder) RecordSaphanaUptimeDataPoint(ts pcommon.Timestamp, val
}
// RecordSaphanaVolumeOperationCountDataPoint adds a data point to saphana.volume.operation.count metric.
-func (mb *MetricsBuilder) RecordSaphanaVolumeOperationCountDataPoint(ts pcommon.Timestamp, val string, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaVolumeOperationCountDataPoint(ts pcommon.Timestamp, val string, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue AttributeVolumeOperationType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaVolumeOperationCount, value was %s: %w", val, err)
} else {
- mb.metricSaphanaVolumeOperationCount.recordDataPoint(mb.startTime, ts, i, pathAttributeValue, diskUsageTypeAttributeValue, volumeOperationTypeAttributeValue)
+ mb.metricSaphanaVolumeOperationCount.recordDataPoint(mb.startTime, ts, i, pathAttributeValue, diskUsageTypeAttributeValue, volumeOperationTypeAttributeValue.String())
}
return nil
}
// RecordSaphanaVolumeOperationSizeDataPoint adds a data point to saphana.volume.operation.size metric.
-func (mb *MetricsBuilder) RecordSaphanaVolumeOperationSizeDataPoint(ts pcommon.Timestamp, val string, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaVolumeOperationSizeDataPoint(ts pcommon.Timestamp, val string, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue AttributeVolumeOperationType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaVolumeOperationSize, value was %s: %w", val, err)
} else {
- mb.metricSaphanaVolumeOperationSize.recordDataPoint(mb.startTime, ts, i, pathAttributeValue, diskUsageTypeAttributeValue, volumeOperationTypeAttributeValue)
+ mb.metricSaphanaVolumeOperationSize.recordDataPoint(mb.startTime, ts, i, pathAttributeValue, diskUsageTypeAttributeValue, volumeOperationTypeAttributeValue.String())
}
return nil
}
// RecordSaphanaVolumeOperationTimeDataPoint adds a data point to saphana.volume.operation.time metric.
-func (mb *MetricsBuilder) RecordSaphanaVolumeOperationTimeDataPoint(ts pcommon.Timestamp, val string, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue string) error {
+func (mb *MetricsBuilder) RecordSaphanaVolumeOperationTimeDataPoint(ts pcommon.Timestamp, val string, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue AttributeVolumeOperationType) error {
if i, err := strconv.ParseInt(val, 10, 64); err != nil {
return fmt.Errorf("failed to parse int for SaphanaVolumeOperationTime, value was %s: %w", val, err)
} else {
- mb.metricSaphanaVolumeOperationTime.recordDataPoint(mb.startTime, ts, i, pathAttributeValue, diskUsageTypeAttributeValue, volumeOperationTypeAttributeValue)
+ mb.metricSaphanaVolumeOperationTime.recordDataPoint(mb.startTime, ts, i, pathAttributeValue, diskUsageTypeAttributeValue, volumeOperationTypeAttributeValue.String())
}
return nil
}
@@ -3378,187 +3890,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeActivePendingRequestState are the possible values that the attribute "active_pending_request_state" can have.
-var AttributeActivePendingRequestState = struct {
- Active string
- Pending string
-}{
- "active",
- "pending",
-}
-
-// AttributeColumnMemorySubtype are the possible values that the attribute "column_memory_subtype" can have.
-var AttributeColumnMemorySubtype = struct {
- Data string
- Dict string
- Index string
- Misc string
-}{
- "data",
- "dict",
- "index",
- "misc",
-}
-
-// AttributeColumnMemoryType are the possible values that the attribute "column_memory_type" can have.
-var AttributeColumnMemoryType = struct {
- Main string
- Delta string
-}{
- "main",
- "delta",
-}
-
-// AttributeConnectionStatus are the possible values that the attribute "connection_status" can have.
-var AttributeConnectionStatus = struct {
- Running string
- Idle string
- Queueing string
-}{
- "running",
- "idle",
- "queueing",
-}
-
-// AttributeCPUType are the possible values that the attribute "cpu_type" can have.
-var AttributeCPUType = struct {
- User string
- System string
- IoWait string
- Idle string
-}{
- "user",
- "system",
- "io_wait",
- "idle",
-}
-
-// AttributeDiskStateUsedFree are the possible values that the attribute "disk_state_used_free" can have.
-var AttributeDiskStateUsedFree = struct {
- Used string
- Free string
-}{
- "used",
- "free",
-}
-
-// AttributeHostSwapState are the possible values that the attribute "host_swap_state" can have.
-var AttributeHostSwapState = struct {
- Used string
- Free string
-}{
- "used",
- "free",
-}
-
-// AttributeInternalExternalRequestType are the possible values that the attribute "internal_external_request_type" can have.
-var AttributeInternalExternalRequestType = struct {
- Internal string
- External string
-}{
- "internal",
- "external",
-}
-
-// AttributeMemoryStateUsedFree are the possible values that the attribute "memory_state_used_free" can have.
-var AttributeMemoryStateUsedFree = struct {
- Used string
- Free string
-}{
- "used",
- "free",
-}
-
-// AttributeRowMemoryType are the possible values that the attribute "row_memory_type" can have.
-var AttributeRowMemoryType = struct {
- Fixed string
- Variable string
-}{
- "fixed",
- "variable",
-}
-
-// AttributeSchemaMemoryType are the possible values that the attribute "schema_memory_type" can have.
-var AttributeSchemaMemoryType = struct {
- Main string
- Delta string
- HistoryMain string
- HistoryDelta string
-}{
- "main",
- "delta",
- "history_main",
- "history_delta",
-}
-
-// AttributeSchemaOperationType are the possible values that the attribute "schema_operation_type" can have.
-var AttributeSchemaOperationType = struct {
- Read string
- Write string
- Merge string
-}{
- "read",
- "write",
- "merge",
-}
-
-// AttributeSchemaRecordType are the possible values that the attribute "schema_record_type" can have.
-var AttributeSchemaRecordType = struct {
- Main string
- Delta string
- HistoryMain string
- HistoryDelta string
-}{
- "main",
- "delta",
- "history_main",
- "history_delta",
-}
-
-// AttributeServiceMemoryUsedType are the possible values that the attribute "service_memory_used_type" can have.
-var AttributeServiceMemoryUsedType = struct {
- Logical string
- Physical string
-}{
- "logical",
- "physical",
-}
-
-// AttributeServiceStatus are the possible values that the attribute "service_status" can have.
-var AttributeServiceStatus = struct {
- Active string
- Inactive string
-}{
- "active",
- "inactive",
-}
-
-// AttributeThreadStatus are the possible values that the attribute "thread_status" can have.
-var AttributeThreadStatus = struct {
- Active string
- Inactive string
-}{
- "active",
- "inactive",
-}
-
-// AttributeTransactionType are the possible values that the attribute "transaction_type" can have.
-var AttributeTransactionType = struct {
- Update string
- Commit string
- Rollback string
-}{
- "update",
- "commit",
- "rollback",
-}
-
-// AttributeVolumeOperationType are the possible values that the attribute "volume_operation_type" can have.
-var AttributeVolumeOperationType = struct {
- Read string
- Write string
-}{
- "read",
- "write",
-}
diff --git a/receiver/saphanareceiver/queries.go b/receiver/saphanareceiver/queries.go
index 3efef470e51f..76b7d4cdc17a 100644
--- a/receiver/saphanareceiver/queries.go
+++ b/receiver/saphanareceiver/queries.go
@@ -73,14 +73,14 @@ var queries = []monitoringQuery{
key: "active_services",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceCountDataPoint(now, val, metadata.AttributeServiceStatus.Active)
+ mb.RecordSaphanaServiceCountDataPoint(now, val, metadata.AttributeServiceStatusActive)
},
},
{
key: "inactive_services",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceCountDataPoint(now, val, metadata.AttributeServiceStatus.Inactive)
+ mb.RecordSaphanaServiceCountDataPoint(now, val, metadata.AttributeServiceStatusInactive)
},
},
},
@@ -96,14 +96,14 @@ var queries = []monitoringQuery{
key: "active_threads",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceThreadCountDataPoint(now, val, metadata.AttributeThreadStatus.Active)
+ mb.RecordSaphanaServiceThreadCountDataPoint(now, val, metadata.AttributeThreadStatusActive)
},
},
{
key: "inactive_threads",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceThreadCountDataPoint(now, val, metadata.AttributeThreadStatus.Inactive)
+ mb.RecordSaphanaServiceThreadCountDataPoint(now, val, metadata.AttributeThreadStatusInactive)
},
},
},
@@ -119,56 +119,56 @@ var queries = []monitoringQuery{
key: "main_data",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryType.Main, metadata.AttributeColumnMemorySubtype.Data)
+ mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeMain, metadata.AttributeColumnMemorySubtypeData)
},
},
{
key: "main_dict",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryType.Main, metadata.AttributeColumnMemorySubtype.Dict)
+ mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeMain, metadata.AttributeColumnMemorySubtypeDict)
},
},
{
key: "main_index",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryType.Main, metadata.AttributeColumnMemorySubtype.Index)
+ mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeMain, metadata.AttributeColumnMemorySubtypeIndex)
},
},
{
key: "main_misc",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryType.Main, metadata.AttributeColumnMemorySubtype.Misc)
+ mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeMain, metadata.AttributeColumnMemorySubtypeMisc)
},
},
{
key: "delta_data",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryType.Delta, metadata.AttributeColumnMemorySubtype.Data)
+ mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeDelta, metadata.AttributeColumnMemorySubtypeData)
},
},
{
key: "delta_dict",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryType.Delta, metadata.AttributeColumnMemorySubtype.Dict)
+ mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeDelta, metadata.AttributeColumnMemorySubtypeDict)
},
},
{
key: "delta_index",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryType.Delta, metadata.AttributeColumnMemorySubtype.Index)
+ mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeDelta, metadata.AttributeColumnMemorySubtypeIndex)
},
},
{
key: "delta_misc",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryType.Delta, metadata.AttributeColumnMemorySubtype.Misc)
+ mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeDelta, metadata.AttributeColumnMemorySubtypeMisc)
},
},
},
@@ -184,14 +184,14 @@ var queries = []monitoringQuery{
key: "fixed",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaRowStoreMemoryUsedDataPoint(now, val, metadata.AttributeRowMemoryType.Fixed)
+ mb.RecordSaphanaRowStoreMemoryUsedDataPoint(now, val, metadata.AttributeRowMemoryTypeFixed)
},
},
{
key: "variable",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaRowStoreMemoryUsedDataPoint(now, val, metadata.AttributeRowMemoryType.Variable)
+ mb.RecordSaphanaRowStoreMemoryUsedDataPoint(now, val, metadata.AttributeRowMemoryTypeVariable)
},
},
},
@@ -224,7 +224,8 @@ var queries = []monitoringQuery{
key: "connections",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaConnectionCountDataPoint(now, val, strings.ToLower(row["connection_status"]))
+ mb.RecordSaphanaConnectionCountDataPoint(now, val,
+ metadata.MapAttributeConnectionStatus[strings.ToLower(row["connection_status"])])
},
},
},
@@ -289,21 +290,21 @@ var queries = []monitoringQuery{
key: "updates",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaTransactionCountDataPoint(now, val, metadata.AttributeTransactionType.Update)
+ mb.RecordSaphanaTransactionCountDataPoint(now, val, metadata.AttributeTransactionTypeUpdate)
},
},
{
key: "commits",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaTransactionCountDataPoint(now, val, metadata.AttributeTransactionType.Commit)
+ mb.RecordSaphanaTransactionCountDataPoint(now, val, metadata.AttributeTransactionTypeCommit)
},
},
{
key: "rollbacks",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaTransactionCountDataPoint(now, val, metadata.AttributeTransactionType.Rollback)
+ mb.RecordSaphanaTransactionCountDataPoint(now, val, metadata.AttributeTransactionTypeRollback)
},
},
},
@@ -336,14 +337,14 @@ var queries = []monitoringQuery{
key: "free_size",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaDiskSizeCurrentDataPoint(now, val, row["path"], row["usage_type"], metadata.AttributeDiskStateUsedFree.Free)
+ mb.RecordSaphanaDiskSizeCurrentDataPoint(now, val, row["path"], row["usage_type"], metadata.AttributeDiskStateUsedFreeFree)
},
},
{
key: "used_size",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaDiskSizeCurrentDataPoint(now, val, row["path"], row["usage_type"], metadata.AttributeDiskStateUsedFree.Used)
+ mb.RecordSaphanaDiskSizeCurrentDataPoint(now, val, row["path"], row["usage_type"], metadata.AttributeDiskStateUsedFreeUsed)
},
},
},
@@ -423,28 +424,28 @@ var queries = []monitoringQuery{
key: "external",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaNetworkRequestFinishedCountDataPoint(now, val, metadata.AttributeInternalExternalRequestType.External)
+ mb.RecordSaphanaNetworkRequestFinishedCountDataPoint(now, val, metadata.AttributeInternalExternalRequestTypeExternal)
},
},
{
key: "internal",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaNetworkRequestFinishedCountDataPoint(now, val, metadata.AttributeInternalExternalRequestType.Internal)
+ mb.RecordSaphanaNetworkRequestFinishedCountDataPoint(now, val, metadata.AttributeInternalExternalRequestTypeInternal)
},
},
{
key: "active",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaNetworkRequestCountDataPoint(now, val, metadata.AttributeActivePendingRequestState.Active)
+ mb.RecordSaphanaNetworkRequestCountDataPoint(now, val, metadata.AttributeActivePendingRequestStateActive)
},
},
{
key: "pending",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaNetworkRequestCountDataPoint(now, val, metadata.AttributeActivePendingRequestState.Pending)
+ mb.RecordSaphanaNetworkRequestCountDataPoint(now, val, metadata.AttributeActivePendingRequestStatePending)
},
},
{
@@ -470,42 +471,42 @@ var queries = []monitoringQuery{
key: "reads",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaVolumeOperationCountDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationType.Read)
+ mb.RecordSaphanaVolumeOperationCountDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeRead)
},
},
{
key: "writes",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaVolumeOperationCountDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationType.Write)
+ mb.RecordSaphanaVolumeOperationCountDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeWrite)
},
},
{
key: "read_size",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaVolumeOperationSizeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationType.Read)
+ mb.RecordSaphanaVolumeOperationSizeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeRead)
},
},
{
key: "write_size",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaVolumeOperationSizeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationType.Write)
+ mb.RecordSaphanaVolumeOperationSizeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeWrite)
},
},
{
key: "read_time",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaVolumeOperationTimeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationType.Read)
+ mb.RecordSaphanaVolumeOperationTimeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeRead)
},
},
{
key: "write_time",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaVolumeOperationTimeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationType.Write)
+ mb.RecordSaphanaVolumeOperationTimeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeWrite)
},
},
},
@@ -524,14 +525,14 @@ var queries = []monitoringQuery{
key: "logical_used",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceMemoryUsedDataPoint(now, val, row["service"], metadata.AttributeServiceMemoryUsedType.Logical)
+ mb.RecordSaphanaServiceMemoryUsedDataPoint(now, val, row["service"], metadata.AttributeServiceMemoryUsedTypeLogical)
},
},
{
key: "physical_used",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceMemoryUsedDataPoint(now, val, row["service"], metadata.AttributeServiceMemoryUsedType.Physical)
+ mb.RecordSaphanaServiceMemoryUsedDataPoint(now, val, row["service"], metadata.AttributeServiceMemoryUsedTypePhysical)
},
},
{
@@ -552,28 +553,28 @@ var queries = []monitoringQuery{
key: "heap_free",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceMemoryHeapCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFree.Free)
+ mb.RecordSaphanaServiceMemoryHeapCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFreeFree)
},
},
{
key: "heap_used",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceMemoryHeapCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFree.Used)
+ mb.RecordSaphanaServiceMemoryHeapCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFreeUsed)
},
},
{
key: "shared_free",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceMemorySharedCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFree.Free)
+ mb.RecordSaphanaServiceMemorySharedCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFreeFree)
},
},
{
key: "shared_used",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaServiceMemorySharedCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFree.Used)
+ mb.RecordSaphanaServiceMemorySharedCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFreeUsed)
},
},
{
@@ -640,77 +641,77 @@ var queries = []monitoringQuery{
key: "reads",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaOperationCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaOperationType.Read)
+ mb.RecordSaphanaSchemaOperationCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaOperationTypeRead)
},
},
{
key: "writes",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaOperationCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaOperationType.Write)
+ mb.RecordSaphanaSchemaOperationCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaOperationTypeWrite)
},
},
{
key: "merges",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaOperationCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaOperationType.Merge)
+ mb.RecordSaphanaSchemaOperationCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaOperationTypeMerge)
},
},
{
key: "mem_main",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryType.Main)
+ mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryTypeMain)
},
},
{
key: "mem_delta",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryType.Delta)
+ mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryTypeDelta)
},
},
{
key: "mem_history_main",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryType.HistoryMain)
+ mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryTypeHistoryMain)
},
},
{
key: "mem_history_delta",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryType.HistoryDelta)
+ mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryTypeHistoryDelta)
},
},
{
key: "records_main",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordType.Main)
+ mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordTypeMain)
},
},
{
key: "records_delta",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordType.Delta)
+ mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordTypeDelta)
},
},
{
key: "records_history_main",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordType.HistoryMain)
+ mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordTypeHistoryMain)
},
},
{
key: "records_history_delta",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordType.HistoryDelta)
+ mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordTypeHistoryDelta)
},
},
},
@@ -730,35 +731,35 @@ var queries = []monitoringQuery{
key: "free_physical_memory",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaHostMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFree.Free)
+ mb.RecordSaphanaHostMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFreeFree)
},
},
{
key: "used_physical_memory",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaHostMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFree.Used)
+ mb.RecordSaphanaHostMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFreeUsed)
},
},
{
key: "free_swap_space",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaHostSwapCurrentDataPoint(now, val, metadata.AttributeHostSwapState.Free)
+ mb.RecordSaphanaHostSwapCurrentDataPoint(now, val, metadata.AttributeHostSwapStateFree)
},
},
{
key: "used_swap_space",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaHostSwapCurrentDataPoint(now, val, metadata.AttributeHostSwapState.Used)
+ mb.RecordSaphanaHostSwapCurrentDataPoint(now, val, metadata.AttributeHostSwapStateUsed)
},
},
{
key: "instance_total_used",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaInstanceMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFree.Used)
+ mb.RecordSaphanaInstanceMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFreeUsed)
},
},
{
@@ -772,7 +773,7 @@ var queries = []monitoringQuery{
key: "instance_total_free",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaInstanceMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFree.Free)
+ mb.RecordSaphanaInstanceMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFreeFree)
},
},
{
@@ -793,28 +794,28 @@ var queries = []monitoringQuery{
key: "cpu_user",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUType.User)
+ mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUTypeUser)
},
},
{
key: "cpu_system",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUType.System)
+ mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUTypeSystem)
},
},
{
key: "cpu_io_wait",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUType.IoWait)
+ mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUTypeIoWait)
},
},
{
key: "cpu_idle",
addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,
row map[string]string) {
- mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUType.Idle)
+ mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUTypeIdle)
},
},
},
diff --git a/receiver/sqlserverreceiver/internal/metadata/generated_metrics_v2.go b/receiver/sqlserverreceiver/internal/metadata/generated_metrics_v2.go
index 36c0e836ced2..fef91179a7b8 100644
--- a/receiver/sqlserverreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/sqlserverreceiver/internal/metadata/generated_metrics_v2.go
@@ -103,6 +103,32 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributePageOperations specifies the a value page.operations attribute.
+type AttributePageOperations int
+
+const (
+ _ AttributePageOperations = iota
+ AttributePageOperationsRead
+ AttributePageOperationsWrite
+)
+
+// String returns the string representation of the AttributePageOperations.
+func (av AttributePageOperations) String() string {
+ switch av {
+ case AttributePageOperationsRead:
+ return "read"
+ case AttributePageOperationsWrite:
+ return "write"
+ }
+ return ""
+}
+
+// MapAttributePageOperations is a helper map of string to AttributePageOperations attribute value.
+var MapAttributePageOperations = map[string]AttributePageOperations{
+ "read": AttributePageOperationsRead,
+ "write": AttributePageOperationsWrite,
+}
+
type metricSqlserverBatchRequestRate struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -1274,8 +1300,8 @@ func (mb *MetricsBuilder) RecordSqlserverPageLifeExpectancyDataPoint(ts pcommon.
}
// RecordSqlserverPageOperationRateDataPoint adds a data point to sqlserver.page.operation.rate metric.
-func (mb *MetricsBuilder) RecordSqlserverPageOperationRateDataPoint(ts pcommon.Timestamp, val float64, pageOperationsAttributeValue string) {
- mb.metricSqlserverPageOperationRate.recordDataPoint(mb.startTime, ts, val, pageOperationsAttributeValue)
+func (mb *MetricsBuilder) RecordSqlserverPageOperationRateDataPoint(ts pcommon.Timestamp, val float64, pageOperationsAttributeValue AttributePageOperations) {
+ mb.metricSqlserverPageOperationRate.recordDataPoint(mb.startTime, ts, val, pageOperationsAttributeValue.String())
}
// RecordSqlserverPageSplitRateDataPoint adds a data point to sqlserver.page.split.rate metric.
@@ -1347,12 +1373,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributePageOperations are the possible values that the attribute "page.operations" can have.
-var AttributePageOperations = struct {
- Read string
- Write string
-}{
- "read",
- "write",
-}
diff --git a/receiver/sqlserverreceiver/internal/metadata/metrics_builder_ext.go b/receiver/sqlserverreceiver/internal/metadata/metrics_builder_ext.go
index 6348e2744262..a37983f58de8 100644
--- a/receiver/sqlserverreceiver/internal/metadata/metrics_builder_ext.go
+++ b/receiver/sqlserverreceiver/internal/metadata/metrics_builder_ext.go
@@ -41,7 +41,7 @@ func (mb *MetricsBuilder) RecordAnyDataPoint(ts pcommon.Timestamp, val float64,
case "sqlserver.page.life_expectancy":
mb.RecordSqlserverPageLifeExpectancyDataPoint(ts, int64(val))
case "sqlserver.page.operation.rate":
- mb.RecordSqlserverPageOperationRateDataPoint(ts, val, attributes["type"])
+ mb.RecordSqlserverPageOperationRateDataPoint(ts, val, MapAttributePageOperations[attributes["type"]])
case "sqlserver.page.split.rate":
mb.RecordSqlserverPageSplitRateDataPoint(ts, val)
case "sqlserver.transaction_log.flush.data.rate":
diff --git a/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go b/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go
index 0bf859755346..b6233891c58a 100644
--- a/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go
+++ b/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go
@@ -83,6 +83,58 @@ func DefaultMetricsSettings() MetricsSettings {
}
}
+// AttributeDirection specifies the a value direction attribute.
+type AttributeDirection int
+
+const (
+ _ AttributeDirection = iota
+ AttributeDirectionReceived
+ AttributeDirectionSent
+)
+
+// String returns the string representation of the AttributeDirection.
+func (av AttributeDirection) String() string {
+ switch av {
+ case AttributeDirectionReceived:
+ return "received"
+ case AttributeDirectionSent:
+ return "sent"
+ }
+ return ""
+}
+
+// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
+var MapAttributeDirection = map[string]AttributeDirection{
+ "received": AttributeDirectionReceived,
+ "sent": AttributeDirectionSent,
+}
+
+// AttributeState specifies the a value state attribute.
+type AttributeState int
+
+const (
+ _ AttributeState = iota
+ AttributeStateSynced
+ AttributeStateUnsynced
+)
+
+// String returns the string representation of the AttributeState.
+func (av AttributeState) String() string {
+ switch av {
+ case AttributeStateSynced:
+ return "synced"
+ case AttributeStateUnsynced:
+ return "unsynced"
+ }
+ return ""
+}
+
+// MapAttributeState is a helper map of string to AttributeState attribute value.
+var MapAttributeState = map[string]AttributeState{
+ "synced": AttributeStateSynced,
+ "unsynced": AttributeStateUnsynced,
+}
+
type metricZookeeperConnectionActive struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -1001,8 +1053,8 @@ func (mb *MetricsBuilder) RecordZookeeperFileDescriptorOpenDataPoint(ts pcommon.
}
// RecordZookeeperFollowerCountDataPoint adds a data point to zookeeper.follower.count metric.
-func (mb *MetricsBuilder) RecordZookeeperFollowerCountDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue string) {
- mb.metricZookeeperFollowerCount.recordDataPoint(mb.startTime, ts, val, stateAttributeValue)
+func (mb *MetricsBuilder) RecordZookeeperFollowerCountDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue AttributeState) {
+ mb.metricZookeeperFollowerCount.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String())
}
// RecordZookeeperFsyncExceededThresholdCountDataPoint adds a data point to zookeeper.fsync.exceeded_threshold.count metric.
@@ -1026,8 +1078,8 @@ func (mb *MetricsBuilder) RecordZookeeperLatencyMinDataPoint(ts pcommon.Timestam
}
// RecordZookeeperPacketCountDataPoint adds a data point to zookeeper.packet.count metric.
-func (mb *MetricsBuilder) RecordZookeeperPacketCountDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) {
- mb.metricZookeeperPacketCount.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+func (mb *MetricsBuilder) RecordZookeeperPacketCountDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
+ mb.metricZookeeperPacketCount.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordZookeeperRequestActiveDataPoint adds a data point to zookeeper.request.active metric.
@@ -1072,21 +1124,3 @@ var Attributes = struct {
// A is an alias for Attributes.
var A = Attributes
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Received string
- Sent string
-}{
- "received",
- "sent",
-}
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- Synced string
- Unsynced string
-}{
- "synced",
- "unsynced",
-}
diff --git a/receiver/zookeeperreceiver/metrics.go b/receiver/zookeeperreceiver/metrics.go
index e946291b2a00..0aeb045a5ea9 100644
--- a/receiver/zookeeperreceiver/metrics.go
+++ b/receiver/zookeeperreceiver/metrics.go
@@ -71,7 +71,7 @@ func (m *metricCreator) recordDataPointsFunc(metric string) func(ts pcommon.Time
case syncedFollowersMetricKey:
return func(ts pcommon.Timestamp, val int64) {
m.computedMetricStore[syncedFollowersMetricKey] = val
- m.mb.RecordZookeeperFollowerCountDataPoint(ts, val, metadata.AttributeState.Synced)
+ m.mb.RecordZookeeperFollowerCountDataPoint(ts, val, metadata.AttributeStateSynced)
}
case pendingSyncsMetricKey:
return m.mb.RecordZookeeperSyncPendingDataPoint
@@ -101,11 +101,11 @@ func (m *metricCreator) recordDataPointsFunc(metric string) func(ts pcommon.Time
return m.mb.RecordZookeeperFsyncExceededThresholdCountDataPoint
case packetsReceivedMetricKey:
return func(ts pcommon.Timestamp, val int64) {
- m.mb.RecordZookeeperPacketCountDataPoint(ts, val, metadata.AttributeDirection.Received)
+ m.mb.RecordZookeeperPacketCountDataPoint(ts, val, metadata.AttributeDirectionReceived)
}
case packetsSentMetricKey:
return func(ts pcommon.Timestamp, val int64) {
- m.mb.RecordZookeeperPacketCountDataPoint(ts, val, metadata.AttributeDirection.Sent)
+ m.mb.RecordZookeeperPacketCountDataPoint(ts, val, metadata.AttributeDirectionSent)
}
}
@@ -132,7 +132,7 @@ func (m *metricCreator) computeNotSyncedFollowersMetric(ts pcommon.Timestamp) er
}
val := followersTotal - syncedFollowers
- m.mb.RecordZookeeperFollowerCountDataPoint(ts, val, metadata.AttributeState.Unsynced)
+ m.mb.RecordZookeeperFollowerCountDataPoint(ts, val, metadata.AttributeStateUnsynced)
return nil
}