diff --git a/.chloggen/elasticsearch-receiver-add-indices.yaml b/.chloggen/elasticsearch-receiver-add-indices.yaml
new file mode 100644
index 000000000000..34cb10dbf608
--- /dev/null
+++ b/.chloggen/elasticsearch-receiver-add-indices.yaml
@@ -0,0 +1,5 @@
+change_type: enhancement
+component: elasticsearchreceiver
+note: Add scraping index stats and generate search metrics
+issues: [14635]
+
diff --git a/receiver/elasticsearchreceiver/README.md b/receiver/elasticsearchreceiver/README.md
index 9389371e88ae..8348de83692d 100644
--- a/receiver/elasticsearchreceiver/README.md
+++ b/receiver/elasticsearchreceiver/README.md
@@ -6,7 +6,7 @@
| Supported pipeline types | metrics |
| Distributions | [contrib] |
-This receiver queries the Elasticsearch [node stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) and [cluster health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) endpoints in order to scrape metrics from a running elasticsearch cluster.
+This receiver queries the Elasticsearch [node stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html), [cluster health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) and [index stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) endpoints in order to scrape metrics from a running elasticsearch cluster.
## Prerequisites
@@ -21,6 +21,7 @@ The following settings are optional:
- `metrics` (default: see `DefaultMetricsSettings` [here](./internal/metadata/generated_metrics.go): Allows enabling and disabling specific metrics from being collected in this receiver.
- `nodes` (default: `["_all"]`): Allows specifying node filters that define which nodes are scraped for node-level metrics. See [the Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/7.9/cluster.html#cluster-nodes) for allowed filters. If this option is left explicitly empty, then no node-level metrics will be scraped.
- `skip_cluster_metrics` (default: `false`): If true, cluster-level metrics will not be scraped.
+- `indices` (default: `["_all"]`): Allows specifying index filters that define which indices are scraped for index-level metrics. See [the Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html#index-stats-api-path-params) for allowed filters. If this option is left explicitly empty, then no index-level metrics will be scraped.
- `endpoint` (default = `http://localhost:9200`): The base URL of the Elasticsearch API for the cluster to monitor.
- `username` (no default): Specifies the username used to authenticate with Elasticsearch using basic auth. Must be specified if password is specified.
- `password` (no default): Specifies the password used to authenticate with Elasticsearch using basic auth. Must be specified if username is specified.
@@ -36,6 +37,7 @@ receivers:
enabled: false
nodes: ["_local"]
skip_cluster_metrics: true
+ indices: [".geoip_databases"]
endpoint: http://localhost:9200
username: otel
password: password
diff --git a/receiver/elasticsearchreceiver/client.go b/receiver/elasticsearchreceiver/client.go
index cfec3c498b6e..8b51ded70d9f 100644
--- a/receiver/elasticsearchreceiver/client.go
+++ b/receiver/elasticsearchreceiver/client.go
@@ -40,7 +40,8 @@ var (
type elasticsearchClient interface {
NodeStats(ctx context.Context, nodes []string) (*model.NodeStats, error)
ClusterHealth(ctx context.Context) (*model.ClusterHealth, error)
- Version(ctx context.Context) (*model.VersionResponse, error)
+ IndexStats(ctx context.Context, indices []string) (*model.IndexStats, error)
+ ClusterMetadata(ctx context.Context) (*model.ClusterMetadataResponse, error)
}
// defaultElasticsearchClient is the main implementation of elasticsearchClient.
@@ -88,6 +89,8 @@ const nodeStatsMetrics = "breaker,indices,process,jvm,thread_pool,transport,http
// nodeStatsIndexMetrics is a comma separated list of index metrics that will be gathered from NodeStats.
const nodeStatsIndexMetrics = "store,docs,indexing,get,search,merge,refresh,flush,warmer,query_cache,fielddata,translog"
+const indexStatsMetrics = "search"
+
func (c defaultElasticsearchClient) NodeStats(ctx context.Context, nodes []string) (*model.NodeStats, error) {
var nodeSpec string
if len(nodes) > 0 {
@@ -119,13 +122,34 @@ func (c defaultElasticsearchClient) ClusterHealth(ctx context.Context) (*model.C
return &clusterHealth, err
}
-func (c defaultElasticsearchClient) Version(ctx context.Context) (*model.VersionResponse, error) {
+func (c defaultElasticsearchClient) IndexStats(ctx context.Context, indices []string) (*model.IndexStats, error) {
+ var indexSpec string
+ if len(indices) > 0 {
+ indexSpec = strings.Join(indices, ",")
+ } else {
+ indexSpec = "_all"
+ }
+
+ indexStatsPath := fmt.Sprintf("%s/_stats/%s", indexSpec, indexStatsMetrics)
+
+ body, err := c.doRequest(ctx, indexStatsPath)
+ if err != nil {
+ return nil, err
+ }
+
+ indexStats := model.IndexStats{}
+ err = json.Unmarshal(body, &indexStats)
+
+ return &indexStats, err
+}
+
+func (c defaultElasticsearchClient) ClusterMetadata(ctx context.Context) (*model.ClusterMetadataResponse, error) {
body, err := c.doRequest(ctx, "")
if err != nil {
return nil, err
}
- versionResponse := model.VersionResponse{}
+ versionResponse := model.ClusterMetadataResponse{}
err = json.Unmarshal(body, &versionResponse)
return &versionResponse, err
}
diff --git a/receiver/elasticsearchreceiver/client_test.go b/receiver/elasticsearchreceiver/client_test.go
index 4c54849e4a45..953c4dd425a1 100644
--- a/receiver/elasticsearchreceiver/client_test.go
+++ b/receiver/elasticsearchreceiver/client_test.go
@@ -236,12 +236,12 @@ func TestClusterHealthNoAuthorization(t *testing.T) {
require.ErrorIs(t, err, errUnauthorized)
}
-func TestVersionNoPassword(t *testing.T) {
- versionJSON, err := os.ReadFile("./testdata/sample_payloads/version.json")
+func TestMetadataNoPassword(t *testing.T) {
+ metadataJSON, err := os.ReadFile("./testdata/sample_payloads/metadata.json")
require.NoError(t, err)
- actualVersion := model.VersionResponse{}
- require.NoError(t, json.Unmarshal(versionJSON, &actualVersion))
+ actualMetadata := model.ClusterMetadataResponse{}
+ require.NoError(t, json.Unmarshal(metadataJSON, &actualMetadata))
elasticsearchMock := mockServer(t, "", "")
defer elasticsearchMock.Close()
@@ -254,18 +254,18 @@ func TestVersionNoPassword(t *testing.T) {
require.NoError(t, err)
ctx := context.Background()
- version, err := client.Version(ctx)
+ metadata, err := client.ClusterMetadata(ctx)
require.NoError(t, err)
- require.Equal(t, &actualVersion, version)
+ require.Equal(t, &actualMetadata, metadata)
}
-func TestVersionAuthentication(t *testing.T) {
- versionJSON, err := os.ReadFile("./testdata/sample_payloads/version.json")
+func TestMetadataAuthentication(t *testing.T) {
+ metadataJSON, err := os.ReadFile("./testdata/sample_payloads/metadata.json")
require.NoError(t, err)
- actualVersion := model.VersionResponse{}
- require.NoError(t, json.Unmarshal(versionJSON, &actualVersion))
+ actualMetadata := model.ClusterMetadataResponse{}
+ require.NoError(t, json.Unmarshal(metadataJSON, &actualMetadata))
username := "user"
password := "pass"
@@ -283,13 +283,13 @@ func TestVersionAuthentication(t *testing.T) {
require.NoError(t, err)
ctx := context.Background()
- version, err := client.Version(ctx)
+ metadata, err := client.ClusterMetadata(ctx)
require.NoError(t, err)
- require.Equal(t, &actualVersion, version)
+ require.Equal(t, &actualMetadata, metadata)
}
-func TestVersionNoAuthentication(t *testing.T) {
+func TestMetadataNoAuthentication(t *testing.T) {
elasticsearchMock := mockServer(t, "user", "pass")
defer elasticsearchMock.Close()
@@ -301,11 +301,11 @@ func TestVersionNoAuthentication(t *testing.T) {
require.NoError(t, err)
ctx := context.Background()
- _, err = client.Version(ctx)
+ _, err = client.ClusterMetadata(ctx)
require.ErrorIs(t, err, errUnauthenticated)
}
-func TestVersionNoAuthorization(t *testing.T) {
+func TestMetadataNoAuthorization(t *testing.T) {
elasticsearchMock := mockServer(t, "user", "pass")
defer elasticsearchMock.Close()
@@ -319,7 +319,7 @@ func TestVersionNoAuthorization(t *testing.T) {
require.NoError(t, err)
ctx := context.Background()
- _, err = client.Version(ctx)
+ _, err = client.ClusterMetadata(ctx)
require.ErrorIs(t, err, errUnauthorized)
}
@@ -366,14 +366,126 @@ func TestDoRequest404(t *testing.T) {
require.Contains(t, err.Error(), "404")
}
+func TestIndexStatsNoPassword(t *testing.T) {
+ indexJSON, err := os.ReadFile("./testdata/sample_payloads/indices.json")
+ require.NoError(t, err)
+
+ actualIndexStats := model.IndexStats{}
+ require.NoError(t, json.Unmarshal(indexJSON, &actualIndexStats))
+
+ elasticsearchMock := mockServer(t, "", "")
+ defer elasticsearchMock.Close()
+
+ client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{
+ HTTPClientSettings: confighttp.HTTPClientSettings{
+ Endpoint: elasticsearchMock.URL,
+ },
+ }, componenttest.NewNopHost())
+ require.NoError(t, err)
+ ctx := context.Background()
+ indexStats, err := client.IndexStats(ctx, []string{"_all"})
+ require.NoError(t, err)
+
+ require.Equal(t, &actualIndexStats, indexStats)
+}
+
+func TestIndexStatsNilNodes(t *testing.T) {
+ indexJSON, err := os.ReadFile("./testdata/sample_payloads/indices.json")
+ require.NoError(t, err)
+
+ actualIndexStats := model.IndexStats{}
+ require.NoError(t, json.Unmarshal(indexJSON, &actualIndexStats))
+
+ elasticsearchMock := mockServer(t, "", "")
+ defer elasticsearchMock.Close()
+
+ client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{
+ HTTPClientSettings: confighttp.HTTPClientSettings{
+ Endpoint: elasticsearchMock.URL,
+ },
+ }, componenttest.NewNopHost())
+ require.NoError(t, err)
+
+ ctx := context.Background()
+ indexStats, err := client.IndexStats(ctx, nil)
+ require.NoError(t, err)
+
+ require.Equal(t, &actualIndexStats, indexStats)
+}
+
+func TestIndexStatsAuthentication(t *testing.T) {
+ indexJSON, err := os.ReadFile("./testdata/sample_payloads/indices.json")
+ require.NoError(t, err)
+
+ actualIndexStats := model.IndexStats{}
+ require.NoError(t, json.Unmarshal(indexJSON, &actualIndexStats))
+
+ username := "user"
+ password := "pass"
+
+ elasticsearchMock := mockServer(t, username, password)
+ defer elasticsearchMock.Close()
+
+ client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{
+ HTTPClientSettings: confighttp.HTTPClientSettings{
+ Endpoint: elasticsearchMock.URL,
+ },
+ Username: username,
+ Password: password,
+ }, componenttest.NewNopHost())
+ require.NoError(t, err)
+
+ ctx := context.Background()
+ indexStats, err := client.IndexStats(ctx, []string{"_all"})
+ require.NoError(t, err)
+
+ require.Equal(t, &actualIndexStats, indexStats)
+}
+
+func TestIndexStatsNoAuthentication(t *testing.T) {
+ elasticsearchMock := mockServer(t, "user", "pass")
+ defer elasticsearchMock.Close()
+
+ client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{
+ HTTPClientSettings: confighttp.HTTPClientSettings{
+ Endpoint: elasticsearchMock.URL,
+ },
+ }, componenttest.NewNopHost())
+ require.NoError(t, err)
+
+ ctx := context.Background()
+ _, err = client.IndexStats(ctx, []string{"_all"})
+ require.ErrorIs(t, err, errUnauthenticated)
+}
+
+func TestIndexStatsBadAuthentication(t *testing.T) {
+ elasticsearchMock := mockServer(t, "user", "pass")
+ defer elasticsearchMock.Close()
+
+ client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{
+ HTTPClientSettings: confighttp.HTTPClientSettings{
+ Endpoint: elasticsearchMock.URL,
+ },
+ Username: "bad_user",
+ Password: "bad_pass",
+ }, componenttest.NewNopHost())
+ require.NoError(t, err)
+
+ ctx := context.Background()
+ _, err = client.IndexStats(ctx, []string{"_all"})
+ require.ErrorIs(t, err, errUnauthorized)
+}
+
// mockServer gives a mock elasticsearch server for testing; if username or password is included, they will be required for the client.
// otherwise, authorization is ignored.
func mockServer(t *testing.T, username, password string) *httptest.Server {
nodes, err := os.ReadFile("./testdata/sample_payloads/nodes_linux.json")
require.NoError(t, err)
+ indices, err := os.ReadFile("./testdata/sample_payloads/indices.json")
+ require.NoError(t, err)
health, err := os.ReadFile("./testdata/sample_payloads/health.json")
require.NoError(t, err)
- version, err := os.ReadFile("./testdata/sample_payloads/version.json")
+ metadata, err := os.ReadFile("./testdata/sample_payloads/metadata.json")
require.NoError(t, err)
elasticsearchMock := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
@@ -395,6 +507,13 @@ func mockServer(t *testing.T, username, password string) *httptest.Server {
return
}
+ if strings.HasPrefix(req.URL.Path, "/_all/_stats") {
+ rw.WriteHeader(200)
+ _, err = rw.Write(indices)
+ require.NoError(t, err)
+ return
+ }
+
if strings.HasPrefix(req.URL.Path, "/_cluster/health") {
rw.WriteHeader(200)
_, err = rw.Write(health)
@@ -402,10 +521,10 @@ func mockServer(t *testing.T, username, password string) *httptest.Server {
return
}
- // version check
+ // metadata check
if req.URL.Path == "/" {
rw.WriteHeader(200)
- _, err = rw.Write(version)
+ _, err = rw.Write(metadata)
require.NoError(t, err)
return
}
diff --git a/receiver/elasticsearchreceiver/config.go b/receiver/elasticsearchreceiver/config.go
index 8bb6bf0d2bff..382ebb4986c8 100644
--- a/receiver/elasticsearchreceiver/config.go
+++ b/receiver/elasticsearchreceiver/config.go
@@ -49,6 +49,11 @@ type Config struct {
Nodes []string `mapstructure:"nodes"`
// SkipClusterMetrics indicates whether cluster level metrics from /_cluster/health should be scraped or not.
SkipClusterMetrics bool `mapstructure:"skip_cluster_metrics"`
+ // Indices defines the indices to scrape.
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html#index-stats-api-path-params
+ // for which names are viable.
+ // If Indices is empty, no indices will be scraped.
+ Indices []string `mapstructure:"indices"`
// Username is the username used when making REST calls to elasticsearch. Must be specified if Password is. Not required.
Username string `mapstructure:"username"`
// Password is the password used when making REST calls to elasticsearch. Must be specified if Username is. Not required.
diff --git a/receiver/elasticsearchreceiver/config_test.go b/receiver/elasticsearchreceiver/config_test.go
index bdcc86b85ca5..e2510282d83a 100644
--- a/receiver/elasticsearchreceiver/config_test.go
+++ b/receiver/elasticsearchreceiver/config_test.go
@@ -168,6 +168,7 @@ func TestLoadConfig(t *testing.T) {
expected: &Config{
SkipClusterMetrics: true,
Nodes: []string{"_local"},
+ Indices: []string{".geoip_databases"},
ScraperControllerSettings: scraperhelper.ScraperControllerSettings{
ReceiverSettings: config.NewReceiverSettings(config.NewComponentID(typeStr)),
CollectionInterval: 2 * time.Minute,
diff --git a/receiver/elasticsearchreceiver/documentation.md b/receiver/elasticsearchreceiver/documentation.md
index 0364ce6fe18b..eb77d4027c5e 100644
--- a/receiver/elasticsearchreceiver/documentation.md
+++ b/receiver/elasticsearchreceiver/documentation.md
@@ -22,6 +22,8 @@ These are the metrics available for this scraper.
| **elasticsearch.cluster.state_queue** | Number of cluster states in queue. | 1 | Sum(Int) |
- cluster_state_queue_state
|
| **elasticsearch.cluster.state_update.count** | The number of cluster state update attempts that changed the cluster state since the node started. | 1 | Sum(Int) | - cluster_state_update_state
|
| **elasticsearch.cluster.state_update.time** | The cumulative amount of time updating the cluster state since the node started. | ms | Sum(Int) | - cluster_state_update_state
- cluster_state_update_type
|
+| **elasticsearch.index.operations.completed** | The number of operations completed for an index. | {operations} | Sum(Int) | - operation
- index_aggregation_type
|
+| **elasticsearch.index.operations.time** | Time spent on operations for an index. | ms | Sum(Int) | - operation
- index_aggregation_type
|
| **elasticsearch.indexing_pressure.memory.limit** | Configured memory limit, in bytes, for the indexing requests. | By | Gauge(Int) | |
| **elasticsearch.indexing_pressure.memory.total.primary_rejections** | Cumulative number of indexing requests rejected in the primary stage. | 1 | Sum(Int) | |
| **elasticsearch.indexing_pressure.memory.total.replica_rejections** | Number of indexing requests rejected in the replica stage. | 1 | Sum(Int) | |
@@ -44,8 +46,8 @@ These are the metrics available for this scraper.
| **elasticsearch.node.ingest.documents.current** | Total number of documents currently being ingested. | {documents} | Sum(Int) | |
| **elasticsearch.node.ingest.operations.failed** | Total number of failed ingest operations during the lifetime of this node. | {operation} | Sum(Int) | |
| **elasticsearch.node.open_files** | The number of open file descriptors held by the node. | {files} | Sum(Int) | |
-| **elasticsearch.node.operations.completed** | The number of operations completed. | {operations} | Sum(Int) | |
-| **elasticsearch.node.operations.time** | Time spent on operations. | ms | Sum(Int) | |
+| **elasticsearch.node.operations.completed** | The number of operations completed by a node. | {operations} | Sum(Int) | |
+| **elasticsearch.node.operations.time** | Time spent on operations by a node. | ms | Sum(Int) | |
| **elasticsearch.node.pipeline.ingest.documents.current** | Total number of documents currently being ingested by a pipeline. | {documents} | Sum(Int) | |
| **elasticsearch.node.pipeline.ingest.documents.preprocessed** | Number of documents preprocessed by the ingest pipeline. | {documents} | Sum(Int) | |
| **elasticsearch.node.pipeline.ingest.operations.failed** | Total number of failed operations for the ingest pipeline. | {operation} | Sum(Int) | |
@@ -92,6 +94,7 @@ metrics:
| Name | Description | Type |
| ---- | ----------- | ---- |
| elasticsearch.cluster.name | The name of the elasticsearch cluster. | String |
+| elasticsearch.index.name | The name of the elasticsearch index. | String |
| elasticsearch.node.name | The name of the elasticsearch node. | String |
## Metric attributes
@@ -109,6 +112,7 @@ metrics:
| document_state (state) | The state of the document. | active, deleted |
| fs_direction (direction) | The direction of filesystem IO. | read, write |
| health_status (status) | The health status of the cluster. | green, yellow, red |
+| index_aggregation_type (aggregation) | Type of shard aggregation for index statistics | primary_shards, total |
| indexing_memory_state (state) | State of the indexing memory | current, total |
| indexing_pressure_stage (stage) | Stage of the indexing pressure | coordinating, primary, replica |
| ingest_pipeline_name (name) | Name of the ingest pipeline. | |
diff --git a/receiver/elasticsearchreceiver/factory.go b/receiver/elasticsearchreceiver/factory.go
index 927ed3c9bee8..0b5539f7cd09 100644
--- a/receiver/elasticsearchreceiver/factory.go
+++ b/receiver/elasticsearchreceiver/factory.go
@@ -58,6 +58,7 @@ func createDefaultConfig() config.Receiver {
},
Metrics: metadata.DefaultMetricsSettings(),
Nodes: []string{"_all"},
+ Indices: []string{"_all"},
}
}
diff --git a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go
index 0e2bab57b2e5..475b052efb70 100644
--- a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go
+++ b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go
@@ -31,6 +31,8 @@ type MetricsSettings struct {
ElasticsearchClusterStateQueue MetricSettings `mapstructure:"elasticsearch.cluster.state_queue"`
ElasticsearchClusterStateUpdateCount MetricSettings `mapstructure:"elasticsearch.cluster.state_update.count"`
ElasticsearchClusterStateUpdateTime MetricSettings `mapstructure:"elasticsearch.cluster.state_update.time"`
+ ElasticsearchIndexOperationsCompleted MetricSettings `mapstructure:"elasticsearch.index.operations.completed"`
+ ElasticsearchIndexOperationsTime MetricSettings `mapstructure:"elasticsearch.index.operations.time"`
ElasticsearchIndexingPressureMemoryLimit MetricSettings `mapstructure:"elasticsearch.indexing_pressure.memory.limit"`
ElasticsearchIndexingPressureMemoryTotalPrimaryRejections MetricSettings `mapstructure:"elasticsearch.indexing_pressure.memory.total.primary_rejections"`
ElasticsearchIndexingPressureMemoryTotalReplicaRejections MetricSettings `mapstructure:"elasticsearch.indexing_pressure.memory.total.replica_rejections"`
@@ -132,6 +134,12 @@ func DefaultMetricsSettings() MetricsSettings {
ElasticsearchClusterStateUpdateTime: MetricSettings{
Enabled: true,
},
+ ElasticsearchIndexOperationsCompleted: MetricSettings{
+ Enabled: true,
+ },
+ ElasticsearchIndexOperationsTime: MetricSettings{
+ Enabled: true,
+ },
ElasticsearchIndexingPressureMemoryLimit: MetricSettings{
Enabled: true,
},
@@ -528,6 +536,32 @@ var MapAttributeHealthStatus = map[string]AttributeHealthStatus{
"red": AttributeHealthStatusRed,
}
+// AttributeIndexAggregationType specifies the a value index_aggregation_type attribute.
+type AttributeIndexAggregationType int
+
+const (
+ _ AttributeIndexAggregationType = iota
+ AttributeIndexAggregationTypePrimaryShards
+ AttributeIndexAggregationTypeTotal
+)
+
+// String returns the string representation of the AttributeIndexAggregationType.
+func (av AttributeIndexAggregationType) String() string {
+ switch av {
+ case AttributeIndexAggregationTypePrimaryShards:
+ return "primary_shards"
+ case AttributeIndexAggregationTypeTotal:
+ return "total"
+ }
+ return ""
+}
+
+// MapAttributeIndexAggregationType is a helper map of string to AttributeIndexAggregationType attribute value.
+var MapAttributeIndexAggregationType = map[string]AttributeIndexAggregationType{
+ "primary_shards": AttributeIndexAggregationTypePrimaryShards,
+ "total": AttributeIndexAggregationTypeTotal,
+}
+
// AttributeIndexingMemoryState specifies the a value indexing_memory_state attribute.
type AttributeIndexingMemoryState int
@@ -1515,6 +1549,114 @@ func newMetricElasticsearchClusterStateUpdateTime(settings MetricSettings) metri
return m
}
+type metricElasticsearchIndexOperationsCompleted struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills elasticsearch.index.operations.completed metric with initial data.
+func (m *metricElasticsearchIndexOperationsCompleted) init() {
+ m.data.SetName("elasticsearch.index.operations.completed")
+ m.data.SetDescription("The number of operations completed for an index.")
+ m.data.SetUnit("{operations}")
+ m.data.SetEmptySum()
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricElasticsearchIndexOperationsCompleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string, indexAggregationTypeAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntValue(val)
+ dp.Attributes().PutStr("operation", operationAttributeValue)
+ dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricElasticsearchIndexOperationsCompleted) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricElasticsearchIndexOperationsCompleted) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricElasticsearchIndexOperationsCompleted(settings MetricSettings) metricElasticsearchIndexOperationsCompleted {
+ m := metricElasticsearchIndexOperationsCompleted{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricElasticsearchIndexOperationsTime struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills elasticsearch.index.operations.time metric with initial data.
+func (m *metricElasticsearchIndexOperationsTime) init() {
+ m.data.SetName("elasticsearch.index.operations.time")
+ m.data.SetDescription("Time spent on operations for an index.")
+ m.data.SetUnit("ms")
+ m.data.SetEmptySum()
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricElasticsearchIndexOperationsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string, indexAggregationTypeAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntValue(val)
+ dp.Attributes().PutStr("operation", operationAttributeValue)
+ dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricElasticsearchIndexOperationsTime) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricElasticsearchIndexOperationsTime) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricElasticsearchIndexOperationsTime(settings MetricSettings) metricElasticsearchIndexOperationsTime {
+ m := metricElasticsearchIndexOperationsTime{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
type metricElasticsearchIndexingPressureMemoryLimit struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -2656,7 +2798,7 @@ type metricElasticsearchNodeOperationsCompleted struct {
// init fills elasticsearch.node.operations.completed metric with initial data.
func (m *metricElasticsearchNodeOperationsCompleted) init() {
m.data.SetName("elasticsearch.node.operations.completed")
- m.data.SetDescription("The number of operations completed.")
+ m.data.SetDescription("The number of operations completed by a node.")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
@@ -2709,7 +2851,7 @@ type metricElasticsearchNodeOperationsTime struct {
// init fills elasticsearch.node.operations.time metric with initial data.
func (m *metricElasticsearchNodeOperationsTime) init() {
m.data.SetName("elasticsearch.node.operations.time")
- m.data.SetDescription("Time spent on operations.")
+ m.data.SetDescription("Time spent on operations by a node.")
m.data.SetUnit("ms")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
@@ -4352,6 +4494,8 @@ type MetricsBuilder struct {
metricElasticsearchClusterStateQueue metricElasticsearchClusterStateQueue
metricElasticsearchClusterStateUpdateCount metricElasticsearchClusterStateUpdateCount
metricElasticsearchClusterStateUpdateTime metricElasticsearchClusterStateUpdateTime
+ metricElasticsearchIndexOperationsCompleted metricElasticsearchIndexOperationsCompleted
+ metricElasticsearchIndexOperationsTime metricElasticsearchIndexOperationsTime
metricElasticsearchIndexingPressureMemoryLimit metricElasticsearchIndexingPressureMemoryLimit
metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections
metricElasticsearchIndexingPressureMemoryTotalReplicaRejections metricElasticsearchIndexingPressureMemoryTotalReplicaRejections
@@ -4438,6 +4582,8 @@ func NewMetricsBuilder(settings MetricsSettings, buildInfo component.BuildInfo,
metricElasticsearchClusterStateQueue: newMetricElasticsearchClusterStateQueue(settings.ElasticsearchClusterStateQueue),
metricElasticsearchClusterStateUpdateCount: newMetricElasticsearchClusterStateUpdateCount(settings.ElasticsearchClusterStateUpdateCount),
metricElasticsearchClusterStateUpdateTime: newMetricElasticsearchClusterStateUpdateTime(settings.ElasticsearchClusterStateUpdateTime),
+ metricElasticsearchIndexOperationsCompleted: newMetricElasticsearchIndexOperationsCompleted(settings.ElasticsearchIndexOperationsCompleted),
+ metricElasticsearchIndexOperationsTime: newMetricElasticsearchIndexOperationsTime(settings.ElasticsearchIndexOperationsTime),
metricElasticsearchIndexingPressureMemoryLimit: newMetricElasticsearchIndexingPressureMemoryLimit(settings.ElasticsearchIndexingPressureMemoryLimit),
metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections: newMetricElasticsearchIndexingPressureMemoryTotalPrimaryRejections(settings.ElasticsearchIndexingPressureMemoryTotalPrimaryRejections),
metricElasticsearchIndexingPressureMemoryTotalReplicaRejections: newMetricElasticsearchIndexingPressureMemoryTotalReplicaRejections(settings.ElasticsearchIndexingPressureMemoryTotalReplicaRejections),
@@ -4520,6 +4666,13 @@ func WithElasticsearchClusterName(val string) ResourceMetricsOption {
}
}
+// WithElasticsearchIndexName sets provided value as "elasticsearch.index.name" attribute for current resource.
+func WithElasticsearchIndexName(val string) ResourceMetricsOption {
+ return func(rm pmetric.ResourceMetrics) {
+ rm.Resource().Attributes().PutStr("elasticsearch.index.name", val)
+ }
+}
+
// WithElasticsearchNodeName sets provided value as "elasticsearch.node.name" attribute for current resource.
func WithElasticsearchNodeName(val string) ResourceMetricsOption {
return func(rm pmetric.ResourceMetrics) {
@@ -4573,6 +4726,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
mb.metricElasticsearchClusterStateQueue.emit(ils.Metrics())
mb.metricElasticsearchClusterStateUpdateCount.emit(ils.Metrics())
mb.metricElasticsearchClusterStateUpdateTime.emit(ils.Metrics())
+ mb.metricElasticsearchIndexOperationsCompleted.emit(ils.Metrics())
+ mb.metricElasticsearchIndexOperationsTime.emit(ils.Metrics())
mb.metricElasticsearchIndexingPressureMemoryLimit.emit(ils.Metrics())
mb.metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections.emit(ils.Metrics())
mb.metricElasticsearchIndexingPressureMemoryTotalReplicaRejections.emit(ils.Metrics())
@@ -4717,6 +4872,16 @@ func (mb *MetricsBuilder) RecordElasticsearchClusterStateUpdateTimeDataPoint(ts
mb.metricElasticsearchClusterStateUpdateTime.recordDataPoint(mb.startTime, ts, val, clusterStateUpdateStateAttributeValue, clusterStateUpdateTypeAttributeValue.String())
}
+// RecordElasticsearchIndexOperationsCompletedDataPoint adds a data point to elasticsearch.index.operations.completed metric.
+func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsCompletedDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation, indexAggregationTypeAttributeValue AttributeIndexAggregationType) {
+ mb.metricElasticsearchIndexOperationsCompleted.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String(), indexAggregationTypeAttributeValue.String())
+}
+
+// RecordElasticsearchIndexOperationsTimeDataPoint adds a data point to elasticsearch.index.operations.time metric.
+func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsTimeDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation, indexAggregationTypeAttributeValue AttributeIndexAggregationType) {
+ mb.metricElasticsearchIndexOperationsTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String(), indexAggregationTypeAttributeValue.String())
+}
+
// RecordElasticsearchIndexingPressureMemoryLimitDataPoint adds a data point to elasticsearch.indexing_pressure.memory.limit metric.
func (mb *MetricsBuilder) RecordElasticsearchIndexingPressureMemoryLimitDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricElasticsearchIndexingPressureMemoryLimit.recordDataPoint(mb.startTime, ts, val)
diff --git a/receiver/elasticsearchreceiver/internal/mocks/elasticsearchClient.go b/receiver/elasticsearchreceiver/internal/mocks/elasticsearchClient.go
index 2b10474a40d0..4852e632018c 100644
--- a/receiver/elasticsearchreceiver/internal/mocks/elasticsearchClient.go
+++ b/receiver/elasticsearchreceiver/internal/mocks/elasticsearchClient.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v2.9.4. DO NOT EDIT.
+// Code generated by mockery v2.14.0. DO NOT EDIT.
package mocks
@@ -38,22 +38,45 @@ func (_m *MockElasticsearchClient) ClusterHealth(ctx context.Context) (*model.Cl
return r0, r1
}
-// NodeStats provides a mock function with given fields: ctx, nodes
-func (_m *MockElasticsearchClient) NodeStats(ctx context.Context, nodes []string) (*model.NodeStats, error) {
- ret := _m.Called(ctx, nodes)
+// ClusterMetadata provides a mock function with given fields: ctx
+func (_m *MockElasticsearchClient) ClusterMetadata(ctx context.Context) (*model.ClusterMetadataResponse, error) {
+ ret := _m.Called(ctx)
- var r0 *model.NodeStats
- if rf, ok := ret.Get(0).(func(context.Context, []string) *model.NodeStats); ok {
- r0 = rf(ctx, nodes)
+ var r0 *model.ClusterMetadataResponse
+ if rf, ok := ret.Get(0).(func(context.Context) *model.ClusterMetadataResponse); ok {
+ r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).(*model.NodeStats)
+ r0 = ret.Get(0).(*model.ClusterMetadataResponse)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// IndexStats provides a mock function with given fields: ctx, indices
+func (_m *MockElasticsearchClient) IndexStats(ctx context.Context, indices []string) (*model.IndexStats, error) {
+ ret := _m.Called(ctx, indices)
+
+ var r0 *model.IndexStats
+ if rf, ok := ret.Get(0).(func(context.Context, []string) *model.IndexStats); ok {
+ r0 = rf(ctx, indices)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*model.IndexStats)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok {
- r1 = rf(ctx, nodes)
+ r1 = rf(ctx, indices)
} else {
r1 = ret.Error(1)
}
@@ -61,25 +84,40 @@ func (_m *MockElasticsearchClient) NodeStats(ctx context.Context, nodes []string
return r0, r1
}
-// Version provides a mock function with given fields: ctx
-func (_m *MockElasticsearchClient) Version(ctx context.Context) (*model.VersionResponse, error) {
- ret := _m.Called(ctx)
+// NodeStats provides a mock function with given fields: ctx, nodes
+func (_m *MockElasticsearchClient) NodeStats(ctx context.Context, nodes []string) (*model.NodeStats, error) {
+ ret := _m.Called(ctx, nodes)
- var r0 *model.VersionResponse
- if rf, ok := ret.Get(0).(func(context.Context) *model.VersionResponse); ok {
- r0 = rf(ctx)
+ var r0 *model.NodeStats
+ if rf, ok := ret.Get(0).(func(context.Context, []string) *model.NodeStats); ok {
+ r0 = rf(ctx, nodes)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).(*model.VersionResponse)
+ r0 = ret.Get(0).(*model.NodeStats)
}
}
var r1 error
- if rf, ok := ret.Get(1).(func(context.Context) error); ok {
- r1 = rf(ctx)
+ if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok {
+ r1 = rf(ctx, nodes)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
+
+type mockConstructorTestingTNewMockElasticsearchClient interface {
+ mock.TestingT
+ Cleanup(func())
+}
+
+// NewMockElasticsearchClient creates a new instance of MockElasticsearchClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewMockElasticsearchClient(t mockConstructorTestingTNewMockElasticsearchClient) *MockElasticsearchClient {
+ mock := &MockElasticsearchClient{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/receiver/elasticsearchreceiver/internal/model/version.go b/receiver/elasticsearchreceiver/internal/model/clustermetadata.go
similarity index 88%
rename from receiver/elasticsearchreceiver/internal/model/version.go
rename to receiver/elasticsearchreceiver/internal/model/clustermetadata.go
index 2f488aa2808f..1f8b02129683 100644
--- a/receiver/elasticsearchreceiver/internal/model/version.go
+++ b/receiver/elasticsearchreceiver/internal/model/clustermetadata.go
@@ -14,8 +14,9 @@
package model // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver/internal/model"
-type VersionResponse struct {
- Version struct {
+type ClusterMetadataResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Version struct {
Number string `json:"number"`
} `json:"version"`
}
diff --git a/receiver/elasticsearchreceiver/internal/model/indexstats.go b/receiver/elasticsearchreceiver/internal/model/indexstats.go
new file mode 100644
index 000000000000..c4d0bf2a2758
--- /dev/null
+++ b/receiver/elasticsearchreceiver/internal/model/indexstats.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver/internal/model"
+
+// IndexStats represents a response from elasticsearch's /_stats endpoint.
+// The struct is not exhaustive; It does not provide all values returned by elasticsearch,
+// only the ones relevant to the metrics retrieved by the scraper.
+type IndexStats struct {
+ All IndexStatsIndexInfo `json:"_all"`
+ Indices map[string]*IndexStatsIndexInfo `json:"indices"`
+}
+
+type IndexStatsIndexInfo struct {
+ Primaries NodeStatsNodesInfoIndices `json:"primaries"`
+ Total NodeStatsNodesInfoIndices `json:"total"`
+}
diff --git a/receiver/elasticsearchreceiver/metadata.yaml b/receiver/elasticsearchreceiver/metadata.yaml
index 6d3b0e2ab270..f9aa5ca6d25f 100644
--- a/receiver/elasticsearchreceiver/metadata.yaml
+++ b/receiver/elasticsearchreceiver/metadata.yaml
@@ -7,6 +7,9 @@ resource_attributes:
elasticsearch.node.name:
description: The name of the elasticsearch node.
type: string
+ elasticsearch.index.name:
+ description: The name of the elasticsearch index.
+ type: string
attributes:
cache_name:
@@ -137,6 +140,13 @@ attributes:
enum:
- hit
- miss
+ index_aggregation_type:
+ value: aggregation
+ description: Type of shard aggregation for index statistics
+ enum:
+ - primary_shards
+ - total
+
metrics:
# these metrics are from /_nodes/stats, and are node level metrics
elasticsearch.breaker.memory.estimated:
@@ -295,7 +305,7 @@ metrics:
attributes: []
enabled: true
elasticsearch.node.operations.completed:
- description: The number of operations completed.
+ description: The number of operations completed by a node.
unit: "{operations}"
sum:
monotonic: true
@@ -304,7 +314,7 @@ metrics:
attributes: [operation]
enabled: true
elasticsearch.node.operations.time:
- description: Time spent on operations.
+ description: Time spent on operations by a node.
unit: ms
sum:
monotonic: true
@@ -750,3 +760,22 @@ metrics:
value_type: int
attributes: [ ]
enabled: true
+ # these metrics are from /*/_stats and are index level metrics
+ elasticsearch.index.operations.completed:
+ description: The number of operations completed for an index.
+ unit: "{operations}"
+ sum:
+ monotonic: true
+ aggregation: cumulative
+ value_type: int
+ attributes: [operation, index_aggregation_type]
+ enabled: true
+ elasticsearch.index.operations.time:
+ description: Time spent on operations for an index.
+ unit: ms
+ sum:
+ monotonic: true
+ aggregation: cumulative
+ value_type: int
+ attributes: [operation, index_aggregation_type]
+ enabled: true
diff --git a/receiver/elasticsearchreceiver/scraper.go b/receiver/elasticsearchreceiver/scraper.go
index 948863c3b1ac..ac49924e413c 100644
--- a/receiver/elasticsearchreceiver/scraper.go
+++ b/receiver/elasticsearchreceiver/scraper.go
@@ -28,6 +28,7 @@ import (
"go.opentelemetry.io/collector/receiver/scrapererror"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver/internal/metadata"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver/internal/model"
)
const (
@@ -81,6 +82,7 @@ type elasticsearchScraper struct {
cfg *Config
mb *metadata.MetricsBuilder
version *version.Version
+ clusterName string
emitMetricsWithDirectionAttribute bool
emitMetricsWithoutDirectionAttribute bool
}
@@ -108,22 +110,25 @@ func (r *elasticsearchScraper) scrape(ctx context.Context) (pmetric.Metrics, err
now := pcommon.NewTimestampFromTime(time.Now())
- r.getVersion(ctx, errs)
+ r.getClusterMetadata(ctx, errs)
r.scrapeNodeMetrics(ctx, now, errs)
r.scrapeClusterMetrics(ctx, now, errs)
+ r.scrapeIndicesMetrics(ctx, now, errs)
return r.mb.Emit(), errs.Combine()
}
// scrapeVersion gets and assigns the elasticsearch version number
-func (r *elasticsearchScraper) getVersion(ctx context.Context, errs *scrapererror.ScrapeErrors) {
- versionResponse, err := r.client.Version(ctx)
+func (r *elasticsearchScraper) getClusterMetadata(ctx context.Context, errs *scrapererror.ScrapeErrors) {
+ response, err := r.client.ClusterMetadata(ctx)
if err != nil {
errs.AddPartial(2, err)
return
}
- esVersion, err := version.NewVersion(versionResponse.Version.Number)
+ r.clusterName = response.ClusterName
+
+ esVersion, err := version.NewVersion(response.Version.Number)
if err != nil {
errs.AddPartial(2, err)
return
@@ -363,3 +368,41 @@ func (r *elasticsearchScraper) scrapeClusterMetrics(ctx context.Context, now pco
r.mb.EmitForResource(metadata.WithElasticsearchClusterName(clusterHealth.ClusterName))
}
+
+func (r *elasticsearchScraper) scrapeIndicesMetrics(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) {
+ if len(r.cfg.Indices) == 0 {
+ return
+ }
+
+ indexStats, err := r.client.IndexStats(ctx, r.cfg.Indices)
+
+ if err != nil {
+ errs.AddPartial(4, err)
+ return
+ }
+
+ // The metrics for all indices are queried by using "_all" name and hence its the name used for labeling them.
+ r.scrapeOneIndexMetrics(now, "_all", &indexStats.All)
+
+ for name, stats := range indexStats.Indices {
+ r.scrapeOneIndexMetrics(now, name, stats)
+ }
+}
+
+func (r *elasticsearchScraper) scrapeOneIndexMetrics(now pcommon.Timestamp, name string, stats *model.IndexStatsIndexInfo) {
+ r.mb.RecordElasticsearchIndexOperationsCompletedDataPoint(
+ now, stats.Total.SearchOperations.FetchTotal, metadata.AttributeOperationFetch, metadata.AttributeIndexAggregationTypeTotal,
+ )
+ r.mb.RecordElasticsearchIndexOperationsCompletedDataPoint(
+ now, stats.Total.SearchOperations.QueryTotal, metadata.AttributeOperationQuery, metadata.AttributeIndexAggregationTypeTotal,
+ )
+
+ r.mb.RecordElasticsearchIndexOperationsTimeDataPoint(
+ now, stats.Total.SearchOperations.FetchTimeInMs, metadata.AttributeOperationFetch, metadata.AttributeIndexAggregationTypeTotal,
+ )
+ r.mb.RecordElasticsearchIndexOperationsTimeDataPoint(
+ now, stats.Total.SearchOperations.QueryTimeInMs, metadata.AttributeOperationQuery, metadata.AttributeIndexAggregationTypeTotal,
+ )
+
+ r.mb.EmitForResource(metadata.WithElasticsearchIndexName(name), metadata.WithElasticsearchClusterName(r.clusterName))
+}
diff --git a/receiver/elasticsearchreceiver/scraper_test.go b/receiver/elasticsearchreceiver/scraper_test.go
index c7af26ec4532..731b4e21cb74 100644
--- a/receiver/elasticsearchreceiver/scraper_test.go
+++ b/receiver/elasticsearchreceiver/scraper_test.go
@@ -1,6 +1,6 @@
// Copyright The OpenTelemetry Authors
//
-// Licensed under the Apache License, Version 2.0 (the "License");
+// Licensed under the Apache License, ClusterMetadata 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
@@ -48,9 +48,10 @@ func TestScraper(t *testing.T) {
require.NoError(t, err)
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(versionNumber(t), nil)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil)
mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil)
mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil)
sc.client = &mockClient
@@ -74,9 +75,10 @@ func TestScraperMetricsWithoutDirection(t *testing.T) {
require.NoError(t, err)
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(versionNumber(t), nil)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil)
mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil)
mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil)
sc.client = &mockClient
@@ -101,9 +103,10 @@ func TestScraperSkipClusterMetrics(t *testing.T) {
require.NoError(t, err)
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(versionNumber(t), nil)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil)
mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil)
mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil)
sc.client = &mockClient
@@ -128,9 +131,10 @@ func TestScraperNoNodesMetrics(t *testing.T) {
require.NoError(t, err)
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(versionNumber(t), nil)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil)
mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil)
mockClient.On("NodeStats", mock.Anything, []string{}).Return(nodeStats(t), nil)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil)
sc.client = &mockClient
@@ -179,9 +183,10 @@ func TestScrapingError(t *testing.T) {
err404 := errors.New("expected status 200 but got 404")
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(versionNumber(t), nil)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil)
mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nil, err404)
mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil)
sc := newElasticSearchScraper(componenttest.NewNopReceiverCreateSettings(), createDefaultConfig().(*Config))
err := sc.start(context.Background(), componenttest.NewNopHost())
@@ -203,9 +208,10 @@ func TestScrapingError(t *testing.T) {
err404 := errors.New("expected status 200 but got 404")
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(versionNumber(t), nil)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil)
mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil)
mockClient.On("ClusterHealth", mock.Anything).Return(nil, err404)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil)
sc := newElasticSearchScraper(componenttest.NewNopReceiverCreateSettings(), createDefaultConfig().(*Config))
err := sc.start(context.Background(), componenttest.NewNopHost())
@@ -220,7 +226,7 @@ func TestScrapingError(t *testing.T) {
},
},
{
- desc: "Both node stats and cluster health fails",
+ desc: "Node stats, index stats and cluster health fails",
run: func(t *testing.T) {
t.Parallel()
@@ -228,9 +234,10 @@ func TestScrapingError(t *testing.T) {
err500 := errors.New("expected status 200 but got 500")
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(versionNumber(t), nil)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil)
mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nil, err500)
mockClient.On("ClusterHealth", mock.Anything).Return(nil, err404)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(nil, err500)
sc := newElasticSearchScraper(componenttest.NewNopReceiverCreateSettings(), createDefaultConfig().(*Config))
err := sc.start(context.Background(), componenttest.NewNopHost())
@@ -246,16 +253,17 @@ func TestScrapingError(t *testing.T) {
},
},
{
- desc: "Version is invalid, node stats and cluster health succeed",
+ desc: "ClusterMetadata is invalid, node stats and cluster health succeed",
run: func(t *testing.T) {
t.Parallel()
err404 := errors.New("expected status 200 but got 404")
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(nil, err404)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(nil, err404)
mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil)
mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil)
sc := newElasticSearchScraper(componenttest.NewNopReceiverCreateSettings(), createDefaultConfig().(*Config))
err := sc.start(context.Background(), componenttest.NewNopHost())
@@ -269,7 +277,7 @@ func TestScrapingError(t *testing.T) {
},
},
{
- desc: "Version, node stats and cluster health fails",
+ desc: "ClusterMetadata, node stats, index stats and cluster health fails",
run: func(t *testing.T) {
t.Parallel()
@@ -277,9 +285,10 @@ func TestScrapingError(t *testing.T) {
err500 := errors.New("expected status 200 but got 500")
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(nil, err404)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(nil, err404)
mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nil, err500)
mockClient.On("ClusterHealth", mock.Anything).Return(nil, err404)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(nil, err500)
sc := newElasticSearchScraper(componenttest.NewNopReceiverCreateSettings(), createDefaultConfig().(*Config))
err := sc.start(context.Background(), componenttest.NewNopHost())
@@ -303,9 +312,10 @@ func TestScrapingError(t *testing.T) {
ch.Status = "pink"
mockClient := mocks.MockElasticsearchClient{}
- mockClient.On("Version", mock.Anything).Return(versionNumber(t), nil)
+ mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil)
mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil)
mockClient.On("ClusterHealth", mock.Anything).Return(ch, nil)
+ mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil)
sc := newElasticSearchScraper(componenttest.NewNopReceiverCreateSettings(), createDefaultConfig().(*Config))
err := sc.start(context.Background(), componenttest.NewNopHost())
@@ -344,11 +354,20 @@ func nodeStats(t *testing.T) *model.NodeStats {
return &nodeStats
}
-func versionNumber(t *testing.T) *model.VersionResponse {
- versionJSON, err := os.ReadFile("./testdata/sample_payloads/version.json")
+func indexStats(t *testing.T) *model.IndexStats {
+ indexJSON, err := os.ReadFile("./testdata/sample_payloads/indices.json")
require.NoError(t, err)
- versionResponse := model.VersionResponse{}
- require.NoError(t, json.Unmarshal(versionJSON, &versionResponse))
- return &versionResponse
+ indexStats := model.IndexStats{}
+ require.NoError(t, json.Unmarshal(indexJSON, &indexStats))
+ return &indexStats
+}
+
+func clusterMetadata(t *testing.T) *model.ClusterMetadataResponse {
+ metadataJSON, err := os.ReadFile("./testdata/sample_payloads/metadata.json")
+ require.NoError(t, err)
+
+ metadataResponse := model.ClusterMetadataResponse{}
+ require.NoError(t, json.Unmarshal(metadataJSON, &metadataResponse))
+ return &metadataResponse
}
diff --git a/receiver/elasticsearchreceiver/testdata/config.yaml b/receiver/elasticsearchreceiver/testdata/config.yaml
index eed921a24c2b..3c71cc01f508 100644
--- a/receiver/elasticsearchreceiver/testdata/config.yaml
+++ b/receiver/elasticsearchreceiver/testdata/config.yaml
@@ -5,6 +5,7 @@ elasticsearch:
enabled: false
nodes: [ "_local" ]
skip_cluster_metrics: true
+ indices: [ ".geoip_databases" ]
endpoint: http://example.com:9200
username: otel
password: password
diff --git a/receiver/elasticsearchreceiver/testdata/expected_metrics/clusterSkip.json b/receiver/elasticsearchreceiver/testdata/expected_metrics/clusterSkip.json
index a29bb3b8d54a..b95e9720ce3a 100644
--- a/receiver/elasticsearchreceiver/testdata/expected_metrics/clusterSkip.json
+++ b/receiver/elasticsearchreceiver/testdata/expected_metrics/clusterSkip.json
@@ -1143,7 +1143,7 @@
"unit": "{files}"
},
{
- "description": "The number of operations completed.",
+ "description": "The number of operations completed by a node.",
"name": "elasticsearch.node.operations.completed",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -1297,7 +1297,7 @@
"unit": "{operations}"
},
{
- "description": "Time spent on operations.",
+ "description": "Time spent on operations by a node.",
"name": "elasticsearch.node.operations.time",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -2183,6 +2183,258 @@
}
}
]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": ".geoip_databases"
+ }
+ },
+ {
+ "key": "elasticsearch.cluster.name",
+ "value": {
+ "stringValue": "docker-cluster"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": "_all"
+ }
+ },
+ {
+ "key": "elasticsearch.cluster.name",
+ "value": {
+ "stringValue": "docker-cluster"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
}
]
}
diff --git a/receiver/elasticsearchreceiver/testdata/expected_metrics/full.json b/receiver/elasticsearchreceiver/testdata/expected_metrics/full.json
index e2286d4bb4bc..c868c7e4e727 100644
--- a/receiver/elasticsearchreceiver/testdata/expected_metrics/full.json
+++ b/receiver/elasticsearchreceiver/testdata/expected_metrics/full.json
@@ -1143,7 +1143,7 @@
"unit": "{files}"
},
{
- "description": "The number of operations completed.",
+ "description": "The number of operations completed by a node.",
"name": "elasticsearch.node.operations.completed",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -1297,7 +1297,7 @@
"unit": "{operations}"
},
{
- "description": "Time spent on operations.",
+ "description": "Time spent on operations by a node.",
"name": "elasticsearch.node.operations.time",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -2376,6 +2376,258 @@
}
}
]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": ".geoip_databases"
+ }
+ },
+ {
+ "key": "elasticsearch.cluster.name",
+ "value": {
+ "stringValue": "docker-cluster"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": "_all"
+ }
+ },
+ {
+ "key": "elasticsearch.cluster.name",
+ "value": {
+ "stringValue": "docker-cluster"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
}
]
}
diff --git a/receiver/elasticsearchreceiver/testdata/expected_metrics/fullWithoutDirection.json b/receiver/elasticsearchreceiver/testdata/expected_metrics/fullWithoutDirection.json
index 8437b191e0bb..513adad9f3d3 100644
--- a/receiver/elasticsearchreceiver/testdata/expected_metrics/fullWithoutDirection.json
+++ b/receiver/elasticsearchreceiver/testdata/expected_metrics/fullWithoutDirection.json
@@ -1138,7 +1138,7 @@
"unit": "{files}"
},
{
- "description": "The number of operations completed.",
+ "description": "The number of operations completed by a node.",
"name": "elasticsearch.node.operations.completed",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -1292,7 +1292,7 @@
"unit": "{operations}"
},
{
- "description": "Time spent on operations.",
+ "description": "Time spent on operations by a node.",
"name": "elasticsearch.node.operations.time",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -2371,6 +2371,258 @@
}
}
]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": ".geoip_databases"
+ }
+ },
+ {
+ "key": "elasticsearch.cluster.name",
+ "value": {
+ "stringValue": "docker-cluster"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": "_all"
+ }
+ },
+ {
+ "key": "elasticsearch.cluster.name",
+ "value": {
+ "stringValue": "docker-cluster"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
}
]
}
diff --git a/receiver/elasticsearchreceiver/testdata/expected_metrics/noNodes.json b/receiver/elasticsearchreceiver/testdata/expected_metrics/noNodes.json
index 00f78f5aa95e..33cd670b3d7a 100644
--- a/receiver/elasticsearchreceiver/testdata/expected_metrics/noNodes.json
+++ b/receiver/elasticsearchreceiver/testdata/expected_metrics/noNodes.json
@@ -192,6 +192,258 @@
}
}
]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": ".geoip_databases"
+ }
+ },
+ {
+ "key": "elasticsearch.cluster.name",
+ "value": {
+ "stringValue": "docker-cluster"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": "_all"
+ }
+ },
+ {
+ "key": "elasticsearch.cluster.name",
+ "value": {
+ "stringValue": "docker-cluster"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
}
]
}
diff --git a/receiver/elasticsearchreceiver/testdata/integration/expected.7_16_3.json b/receiver/elasticsearchreceiver/testdata/integration/expected.7_16_3.json
index f226be1ec524..b2a037a6502f 100644
--- a/receiver/elasticsearchreceiver/testdata/integration/expected.7_16_3.json
+++ b/receiver/elasticsearchreceiver/testdata/integration/expected.7_16_3.json
@@ -1146,7 +1146,7 @@
"unit": "{files}"
},
{
- "description": "The number of operations completed.",
+ "description": "The number of operations completed by a node.",
"name": "elasticsearch.node.operations.completed",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -1300,7 +1300,7 @@
"unit": "{operations}"
},
{
- "description": "Time spent on operations.",
+ "description": "Time spent on operations by a node.",
"name": "elasticsearch.node.operations.time",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -5286,6 +5286,246 @@
}
}
]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": ".geoip_databases"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": "_all"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
}
]
}
diff --git a/receiver/elasticsearchreceiver/testdata/integration/expected.7_9_3.json b/receiver/elasticsearchreceiver/testdata/integration/expected.7_9_3.json
index eab7cf400a90..752c810b937f 100644
--- a/receiver/elasticsearchreceiver/testdata/integration/expected.7_9_3.json
+++ b/receiver/elasticsearchreceiver/testdata/integration/expected.7_9_3.json
@@ -802,7 +802,7 @@
"unit": "{files}"
},
{
- "description": "The number of operations completed.",
+ "description": "The number of operations completed by a node.",
"name": "elasticsearch.node.operations.completed",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -956,7 +956,7 @@
"unit": "{operations}"
},
{
- "description": "Time spent on operations.",
+ "description": "Time spent on operations by a node.",
"name": "elasticsearch.node.operations.time",
"sum": {
"aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
@@ -4067,6 +4067,246 @@
}
}
]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": ".geoip_databases"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
+ },
+ {
+ "resource": {
+ "attributes": [
+ {
+ "key": "elasticsearch.index.name",
+ "value": {
+ "stringValue": "_all"
+ }
+ }
+ ]
+ },
+ "scopeMetrics": [
+ {
+ "metrics": [
+ {
+ "description": "The number of operations completed for an index.",
+ "name": "elasticsearch.index.operations.completed",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "43",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "{operations}"
+ },
+ {
+ "description": "Time spent on operations for an index.",
+ "name": "elasticsearch.index.operations.time",
+ "sum": {
+ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE",
+ "isMonotonic": true,
+ "dataPoints": [
+ {
+ "asInt": "82",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "fetch"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ },
+ {
+ "asInt": "52",
+ "attributes": [
+ {
+ "key": "operation",
+ "value": {
+ "stringValue": "query"
+ }
+ },
+ {
+ "key": "aggregation",
+ "value": {
+ "stringValue": "total"
+ }
+ }
+ ],
+ "startTimeUnixNano": "1661811689941624000",
+ "timeUnixNano": "1661811689943245000"
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ ],
+ "scope": {
+ "name": "otelcol/elasticsearchreceiver",
+ "version": "latest"
+ }
+ }
+ ]
}
]
}
diff --git a/receiver/elasticsearchreceiver/testdata/sample_payloads/indices.json b/receiver/elasticsearchreceiver/testdata/sample_payloads/indices.json
new file mode 100644
index 000000000000..e913fb4b8971
--- /dev/null
+++ b/receiver/elasticsearchreceiver/testdata/sample_payloads/indices.json
@@ -0,0 +1,526 @@
+{
+ "_shards" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0
+ },
+ "_all" : {
+ "primaries" : {
+ "docs" : {
+ "count" : 40,
+ "deleted" : 0
+ },
+ "shard_stats" : {
+ "total_count" : 1
+ },
+ "store" : {
+ "size_in_bytes" : 40230884,
+ "total_data_set_size_in_bytes" : 40230884,
+ "reserved_in_bytes" : 0
+ },
+ "indexing" : {
+ "index_total" : 40,
+ "index_time_in_millis" : 938,
+ "index_current" : 0,
+ "index_failed" : 0,
+ "delete_total" : 0,
+ "delete_time_in_millis" : 0,
+ "delete_current" : 0,
+ "noop_update_total" : 0,
+ "is_throttled" : false,
+ "throttle_time_in_millis" : 0
+ },
+ "get" : {
+ "total" : 0,
+ "time_in_millis" : 0,
+ "exists_total" : 0,
+ "exists_time_in_millis" : 0,
+ "missing_total" : 0,
+ "missing_time_in_millis" : 0,
+ "current" : 0
+ },
+ "search" : {
+ "open_contexts" : 0,
+ "query_total" : 43,
+ "query_time_in_millis" : 52,
+ "query_current" : 0,
+ "fetch_total" : 43,
+ "fetch_time_in_millis" : 82,
+ "fetch_current" : 0,
+ "scroll_total" : 3,
+ "scroll_time_in_millis" : 30,
+ "scroll_current" : 0,
+ "suggest_total" : 0,
+ "suggest_time_in_millis" : 0,
+ "suggest_current" : 0
+ },
+ "merges" : {
+ "current" : 0,
+ "current_docs" : 0,
+ "current_size_in_bytes" : 0,
+ "total" : 0,
+ "total_time_in_millis" : 0,
+ "total_docs" : 0,
+ "total_size_in_bytes" : 0,
+ "total_stopped_time_in_millis" : 0,
+ "total_throttled_time_in_millis" : 0,
+ "total_auto_throttle_in_bytes" : 20971520
+ },
+ "refresh" : {
+ "total" : 10,
+ "total_time_in_millis" : 169,
+ "external_total" : 7,
+ "external_total_time_in_millis" : 161,
+ "listeners" : 0
+ },
+ "flush" : {
+ "total" : 4,
+ "periodic" : 0,
+ "total_time_in_millis" : 192
+ },
+ "warmer" : {
+ "current" : 0,
+ "total" : 6,
+ "total_time_in_millis" : 0
+ },
+ "query_cache" : {
+ "memory_size_in_bytes" : 0,
+ "total_count" : 0,
+ "hit_count" : 0,
+ "miss_count" : 0,
+ "cache_size" : 0,
+ "cache_count" : 0,
+ "evictions" : 0
+ },
+ "fielddata" : {
+ "memory_size_in_bytes" : 0,
+ "evictions" : 0
+ },
+ "completion" : {
+ "size_in_bytes" : 0
+ },
+ "segments" : {
+ "count" : 5,
+ "memory_in_bytes" : 5460,
+ "terms_memory_in_bytes" : 2560,
+ "stored_fields_memory_in_bytes" : 2520,
+ "term_vectors_memory_in_bytes" : 0,
+ "norms_memory_in_bytes" : 0,
+ "points_memory_in_bytes" : 0,
+ "doc_values_memory_in_bytes" : 380,
+ "index_writer_memory_in_bytes" : 0,
+ "version_map_memory_in_bytes" : 0,
+ "fixed_bit_set_memory_in_bytes" : 0,
+ "max_unsafe_auto_id_timestamp" : -1,
+ "file_sizes" : { }
+ },
+ "translog" : {
+ "operations" : 0,
+ "size_in_bytes" : 55,
+ "uncommitted_operations" : 0,
+ "uncommitted_size_in_bytes" : 55,
+ "earliest_last_modified_age" : 1000180
+ },
+ "request_cache" : {
+ "memory_size_in_bytes" : 0,
+ "evictions" : 0,
+ "hit_count" : 0,
+ "miss_count" : 0
+ },
+ "recovery" : {
+ "current_as_source" : 0,
+ "current_as_target" : 0,
+ "throttle_time_in_millis" : 0
+ }
+ },
+ "total" : {
+ "docs" : {
+ "count" : 40,
+ "deleted" : 0
+ },
+ "shard_stats" : {
+ "total_count" : 1
+ },
+ "store" : {
+ "size_in_bytes" : 40230884,
+ "total_data_set_size_in_bytes" : 40230884,
+ "reserved_in_bytes" : 0
+ },
+ "indexing" : {
+ "index_total" : 40,
+ "index_time_in_millis" : 938,
+ "index_current" : 0,
+ "index_failed" : 0,
+ "delete_total" : 0,
+ "delete_time_in_millis" : 0,
+ "delete_current" : 0,
+ "noop_update_total" : 0,
+ "is_throttled" : false,
+ "throttle_time_in_millis" : 0
+ },
+ "get" : {
+ "total" : 0,
+ "time_in_millis" : 0,
+ "exists_total" : 0,
+ "exists_time_in_millis" : 0,
+ "missing_total" : 0,
+ "missing_time_in_millis" : 0,
+ "current" : 0
+ },
+ "search" : {
+ "open_contexts" : 0,
+ "query_total" : 43,
+ "query_time_in_millis" : 52,
+ "query_current" : 0,
+ "fetch_total" : 43,
+ "fetch_time_in_millis" : 82,
+ "fetch_current" : 0,
+ "scroll_total" : 3,
+ "scroll_time_in_millis" : 30,
+ "scroll_current" : 0,
+ "suggest_total" : 0,
+ "suggest_time_in_millis" : 0,
+ "suggest_current" : 0
+ },
+ "merges" : {
+ "current" : 0,
+ "current_docs" : 0,
+ "current_size_in_bytes" : 0,
+ "total" : 0,
+ "total_time_in_millis" : 0,
+ "total_docs" : 0,
+ "total_size_in_bytes" : 0,
+ "total_stopped_time_in_millis" : 0,
+ "total_throttled_time_in_millis" : 0,
+ "total_auto_throttle_in_bytes" : 20971520
+ },
+ "refresh" : {
+ "total" : 10,
+ "total_time_in_millis" : 169,
+ "external_total" : 7,
+ "external_total_time_in_millis" : 161,
+ "listeners" : 0
+ },
+ "flush" : {
+ "total" : 4,
+ "periodic" : 0,
+ "total_time_in_millis" : 192
+ },
+ "warmer" : {
+ "current" : 0,
+ "total" : 6,
+ "total_time_in_millis" : 0
+ },
+ "query_cache" : {
+ "memory_size_in_bytes" : 0,
+ "total_count" : 0,
+ "hit_count" : 0,
+ "miss_count" : 0,
+ "cache_size" : 0,
+ "cache_count" : 0,
+ "evictions" : 0
+ },
+ "fielddata" : {
+ "memory_size_in_bytes" : 0,
+ "evictions" : 0
+ },
+ "completion" : {
+ "size_in_bytes" : 0
+ },
+ "segments" : {
+ "count" : 5,
+ "memory_in_bytes" : 5460,
+ "terms_memory_in_bytes" : 2560,
+ "stored_fields_memory_in_bytes" : 2520,
+ "term_vectors_memory_in_bytes" : 0,
+ "norms_memory_in_bytes" : 0,
+ "points_memory_in_bytes" : 0,
+ "doc_values_memory_in_bytes" : 380,
+ "index_writer_memory_in_bytes" : 0,
+ "version_map_memory_in_bytes" : 0,
+ "fixed_bit_set_memory_in_bytes" : 0,
+ "max_unsafe_auto_id_timestamp" : -1,
+ "file_sizes" : { }
+ },
+ "translog" : {
+ "operations" : 0,
+ "size_in_bytes" : 55,
+ "uncommitted_operations" : 0,
+ "uncommitted_size_in_bytes" : 55,
+ "earliest_last_modified_age" : 1000180
+ },
+ "request_cache" : {
+ "memory_size_in_bytes" : 0,
+ "evictions" : 0,
+ "hit_count" : 0,
+ "miss_count" : 0
+ },
+ "recovery" : {
+ "current_as_source" : 0,
+ "current_as_target" : 0,
+ "throttle_time_in_millis" : 0
+ }
+ }
+ },
+ "indices" : {
+ ".geoip_databases" : {
+ "uuid" : "QaHDkqGGR4qEC8nkT5vblg",
+ "primaries" : {
+ "docs" : {
+ "count" : 40,
+ "deleted" : 0
+ },
+ "shard_stats" : {
+ "total_count" : 1
+ },
+ "store" : {
+ "size_in_bytes" : 40230884,
+ "total_data_set_size_in_bytes" : 40230884,
+ "reserved_in_bytes" : 0
+ },
+ "indexing" : {
+ "index_total" : 40,
+ "index_time_in_millis" : 938,
+ "index_current" : 0,
+ "index_failed" : 0,
+ "delete_total" : 0,
+ "delete_time_in_millis" : 0,
+ "delete_current" : 0,
+ "noop_update_total" : 0,
+ "is_throttled" : false,
+ "throttle_time_in_millis" : 0
+ },
+ "get" : {
+ "total" : 0,
+ "time_in_millis" : 0,
+ "exists_total" : 0,
+ "exists_time_in_millis" : 0,
+ "missing_total" : 0,
+ "missing_time_in_millis" : 0,
+ "current" : 0
+ },
+ "search" : {
+ "open_contexts" : 0,
+ "query_total" : 43,
+ "query_time_in_millis" : 52,
+ "query_current" : 0,
+ "fetch_total" : 43,
+ "fetch_time_in_millis" : 82,
+ "fetch_current" : 0,
+ "scroll_total" : 3,
+ "scroll_time_in_millis" : 30,
+ "scroll_current" : 0,
+ "suggest_total" : 0,
+ "suggest_time_in_millis" : 0,
+ "suggest_current" : 0
+ },
+ "merges" : {
+ "current" : 0,
+ "current_docs" : 0,
+ "current_size_in_bytes" : 0,
+ "total" : 0,
+ "total_time_in_millis" : 0,
+ "total_docs" : 0,
+ "total_size_in_bytes" : 0,
+ "total_stopped_time_in_millis" : 0,
+ "total_throttled_time_in_millis" : 0,
+ "total_auto_throttle_in_bytes" : 20971520
+ },
+ "refresh" : {
+ "total" : 10,
+ "total_time_in_millis" : 169,
+ "external_total" : 7,
+ "external_total_time_in_millis" : 161,
+ "listeners" : 0
+ },
+ "flush" : {
+ "total" : 4,
+ "periodic" : 0,
+ "total_time_in_millis" : 192
+ },
+ "warmer" : {
+ "current" : 0,
+ "total" : 6,
+ "total_time_in_millis" : 0
+ },
+ "query_cache" : {
+ "memory_size_in_bytes" : 0,
+ "total_count" : 0,
+ "hit_count" : 0,
+ "miss_count" : 0,
+ "cache_size" : 0,
+ "cache_count" : 0,
+ "evictions" : 0
+ },
+ "fielddata" : {
+ "memory_size_in_bytes" : 0,
+ "evictions" : 0
+ },
+ "completion" : {
+ "size_in_bytes" : 0
+ },
+ "segments" : {
+ "count" : 5,
+ "memory_in_bytes" : 5460,
+ "terms_memory_in_bytes" : 2560,
+ "stored_fields_memory_in_bytes" : 2520,
+ "term_vectors_memory_in_bytes" : 0,
+ "norms_memory_in_bytes" : 0,
+ "points_memory_in_bytes" : 0,
+ "doc_values_memory_in_bytes" : 380,
+ "index_writer_memory_in_bytes" : 0,
+ "version_map_memory_in_bytes" : 0,
+ "fixed_bit_set_memory_in_bytes" : 0,
+ "max_unsafe_auto_id_timestamp" : -1,
+ "file_sizes" : { }
+ },
+ "translog" : {
+ "operations" : 0,
+ "size_in_bytes" : 55,
+ "uncommitted_operations" : 0,
+ "uncommitted_size_in_bytes" : 55,
+ "earliest_last_modified_age" : 1000180
+ },
+ "request_cache" : {
+ "memory_size_in_bytes" : 0,
+ "evictions" : 0,
+ "hit_count" : 0,
+ "miss_count" : 0
+ },
+ "recovery" : {
+ "current_as_source" : 0,
+ "current_as_target" : 0,
+ "throttle_time_in_millis" : 0
+ }
+ },
+ "total" : {
+ "docs" : {
+ "count" : 40,
+ "deleted" : 0
+ },
+ "shard_stats" : {
+ "total_count" : 1
+ },
+ "store" : {
+ "size_in_bytes" : 40230884,
+ "total_data_set_size_in_bytes" : 40230884,
+ "reserved_in_bytes" : 0
+ },
+ "indexing" : {
+ "index_total" : 40,
+ "index_time_in_millis" : 938,
+ "index_current" : 0,
+ "index_failed" : 0,
+ "delete_total" : 0,
+ "delete_time_in_millis" : 0,
+ "delete_current" : 0,
+ "noop_update_total" : 0,
+ "is_throttled" : false,
+ "throttle_time_in_millis" : 0
+ },
+ "get" : {
+ "total" : 0,
+ "time_in_millis" : 0,
+ "exists_total" : 0,
+ "exists_time_in_millis" : 0,
+ "missing_total" : 0,
+ "missing_time_in_millis" : 0,
+ "current" : 0
+ },
+ "search" : {
+ "open_contexts" : 0,
+ "query_total" : 43,
+ "query_time_in_millis" : 52,
+ "query_current" : 0,
+ "fetch_total" : 43,
+ "fetch_time_in_millis" : 82,
+ "fetch_current" : 0,
+ "scroll_total" : 3,
+ "scroll_time_in_millis" : 30,
+ "scroll_current" : 0,
+ "suggest_total" : 0,
+ "suggest_time_in_millis" : 0,
+ "suggest_current" : 0
+ },
+ "merges" : {
+ "current" : 0,
+ "current_docs" : 0,
+ "current_size_in_bytes" : 0,
+ "total" : 0,
+ "total_time_in_millis" : 0,
+ "total_docs" : 0,
+ "total_size_in_bytes" : 0,
+ "total_stopped_time_in_millis" : 0,
+ "total_throttled_time_in_millis" : 0,
+ "total_auto_throttle_in_bytes" : 20971520
+ },
+ "refresh" : {
+ "total" : 10,
+ "total_time_in_millis" : 169,
+ "external_total" : 7,
+ "external_total_time_in_millis" : 161,
+ "listeners" : 0
+ },
+ "flush" : {
+ "total" : 4,
+ "periodic" : 0,
+ "total_time_in_millis" : 192
+ },
+ "warmer" : {
+ "current" : 0,
+ "total" : 6,
+ "total_time_in_millis" : 0
+ },
+ "query_cache" : {
+ "memory_size_in_bytes" : 0,
+ "total_count" : 0,
+ "hit_count" : 0,
+ "miss_count" : 0,
+ "cache_size" : 0,
+ "cache_count" : 0,
+ "evictions" : 0
+ },
+ "fielddata" : {
+ "memory_size_in_bytes" : 0,
+ "evictions" : 0
+ },
+ "completion" : {
+ "size_in_bytes" : 0
+ },
+ "segments" : {
+ "count" : 5,
+ "memory_in_bytes" : 5460,
+ "terms_memory_in_bytes" : 2560,
+ "stored_fields_memory_in_bytes" : 2520,
+ "term_vectors_memory_in_bytes" : 0,
+ "norms_memory_in_bytes" : 0,
+ "points_memory_in_bytes" : 0,
+ "doc_values_memory_in_bytes" : 380,
+ "index_writer_memory_in_bytes" : 0,
+ "version_map_memory_in_bytes" : 0,
+ "fixed_bit_set_memory_in_bytes" : 0,
+ "max_unsafe_auto_id_timestamp" : -1,
+ "file_sizes" : { }
+ },
+ "translog" : {
+ "operations" : 0,
+ "size_in_bytes" : 55,
+ "uncommitted_operations" : 0,
+ "uncommitted_size_in_bytes" : 55,
+ "earliest_last_modified_age" : 1000180
+ },
+ "request_cache" : {
+ "memory_size_in_bytes" : 0,
+ "evictions" : 0,
+ "hit_count" : 0,
+ "miss_count" : 0
+ },
+ "recovery" : {
+ "current_as_source" : 0,
+ "current_as_target" : 0,
+ "throttle_time_in_millis" : 0
+ }
+ }
+ }
+ }
+}
diff --git a/receiver/elasticsearchreceiver/testdata/sample_payloads/version.json b/receiver/elasticsearchreceiver/testdata/sample_payloads/metadata.json
similarity index 100%
rename from receiver/elasticsearchreceiver/testdata/sample_payloads/version.json
rename to receiver/elasticsearchreceiver/testdata/sample_payloads/metadata.json