From d72bbd21c83f4a65269b5e973f86459a23c94434 Mon Sep 17 00:00:00 2001
From: Stefan Kurek <stefan.kurek@observiq.com>
Date: Fri, 13 Dec 2024 12:48:10 -0500
Subject: [PATCH] [receiver/mongodbatlas] Adds additional disk & process
 metrics (#36694)

<!--Ex. Fixing a bug - Describe the bug and how this fixes the issue.
Ex. Adding a feature - Explain what this achieves.-->
#### Description
Adds additional disk and process metrics to flesh out available
monitoring metrics from the API. No new API calls were needed as we were
already getting this data already.

<!-- Issue number (e.g. #1234) or full URL to issue, if applicable. -->
#### Link to tracking issue
Fixes #36525

<!--Describe what testing was performed and which tests were added.-->
#### Testing
New tests generated and ran.

<!--Describe the documentation added.-->
#### Documentation
New documentation generated.

<!--Please delete paragraphs that you did not use before submitting.-->

---------

Co-authored-by: schmikei <keith.schmitty@gmail.com>
---
 .../mongodbatlasreceiver-metric-adds.yaml     |  31 +++
 .../mongodbatlasreceiver/documentation.md     |  56 ++++-
 .../internal/metadata/generated_config.go     |  12 +
 .../metadata/generated_config_test.go         |   6 +
 .../internal/metadata/generated_metrics.go    | 205 ++++++++++++++++++
 .../metadata/generated_metrics_test.go        |  51 +++++
 .../internal/metadata/metric_name_mapping.go  |  39 +++-
 .../internal/metadata/testdata/config.yaml    |  12 +
 .../internal/metric_conversion.go             |  66 +++++-
 receiver/mongodbatlasreceiver/metadata.yaml   |  33 ++-
 10 files changed, 506 insertions(+), 5 deletions(-)
 create mode 100644 .chloggen/mongodbatlasreceiver-metric-adds.yaml

diff --git a/.chloggen/mongodbatlasreceiver-metric-adds.yaml b/.chloggen/mongodbatlasreceiver-metric-adds.yaml
new file mode 100644
index 000000000000..7c4dabadd869
--- /dev/null
+++ b/.chloggen/mongodbatlasreceiver-metric-adds.yaml
@@ -0,0 +1,31 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: mongodbatlasreceiver
+
+# A brief description of the change.  Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: "Adds additional metrics to the MongoDB Atlas receiver"
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [36525]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext: |
+  Adds a number of new default disabled metrics to the MongoDB Atlas receiver. These metrics are:
+  - mongodbatlas.disk.partition.queue.depth
+  - mongodbatlas.disk.partition.throughput
+  - mongodbatlas.process.cache.ratio
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/receiver/mongodbatlasreceiver/documentation.md b/receiver/mongodbatlasreceiver/documentation.md
index ae5120d4b7a3..251f950865ae 100644
--- a/receiver/mongodbatlasreceiver/documentation.md
+++ b/receiver/mongodbatlasreceiver/documentation.md
@@ -424,7 +424,7 @@ Aggregate of MongoDB Metrics DOCUMENT_METRICS_UPDATED, DOCUMENT_METRICS_DELETED,
 
 DB Operation Rates
 
-Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT
+Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT, OPCOUNTER_TTL_DELETED
 
 | Unit | Metric Type | Value Type |
 | ---- | ----------- | ---------- |
@@ -434,7 +434,7 @@ Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOU
 
 | Name | Description | Values |
 | ---- | ----------- | ------ |
-| operation | Type of database operation | Str: ``cmd``, ``query``, ``update``, ``delete``, ``getmore``, ``insert``, ``scan_and_order`` |
+| operation | Type of database operation | Str: ``cmd``, ``query``, ``update``, ``delete``, ``getmore``, ``insert``, ``scan_and_order``, ``ttl_deleted`` |
 | cluster_role | Whether process is acting as replica or primary | Str: ``primary``, ``replica`` |
 
 ### mongodbatlas.process.db.operations.time
@@ -933,6 +933,58 @@ Aggregate of MongoDB Metrics MAX_SWAP_USAGE_FREE, MAX_SWAP_USAGE_USED
 | ---- | ----------- | ------ |
 | memory_state | Memory usage type | Str: ``resident``, ``virtual``, ``mapped``, ``computed``, ``shared``, ``free``, ``used`` |
 
+## Optional Metrics
+
+The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration:
+
+```yaml
+metrics:
+  <metric_name>:
+    enabled: true
+```
+
+### mongodbatlas.disk.partition.queue.depth
+
+Disk queue depth
+
+Aggregate of MongoDB Metrics DISK_QUEUE_DEPTH
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| 1 | Gauge | Double |
+
+### mongodbatlas.disk.partition.throughput
+
+Disk throughput
+
+Aggregate of MongoDB Metrics DISK_PARTITION_THROUGHPUT_READ, DISK_PARTITION_THROUGHPUT_WRITE
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| By/s | Gauge | Double |
+
+#### Attributes
+
+| Name | Description | Values |
+| ---- | ----------- | ------ |
+| disk_direction | Measurement type for disk operation | Str: ``read``, ``write``, ``total`` |
+
+### mongodbatlas.process.cache.ratio
+
+Cache ratios represented as (%)
+
+Aggregate of MongoDB Metrics CACHE_FILL_RATIO, DIRTY_FILL_RATIO
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| % | Gauge | Double |
+
+#### Attributes
+
+| Name | Description | Values |
+| ---- | ----------- | ------ |
+| cache_ratio_type | Cache ratio type | Str: ``cache_fill``, ``dirty_fill`` |
+
 ## Resource Attributes
 
 | Name | Description | Values | Enabled |
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go
index 46cdab5cd211..9670ae4fe76e 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go
@@ -34,8 +34,10 @@ type MetricsConfig struct {
 	MongodbatlasDiskPartitionIopsMax                      MetricConfig `mapstructure:"mongodbatlas.disk.partition.iops.max"`
 	MongodbatlasDiskPartitionLatencyAverage               MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.average"`
 	MongodbatlasDiskPartitionLatencyMax                   MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.max"`
+	MongodbatlasDiskPartitionQueueDepth                   MetricConfig `mapstructure:"mongodbatlas.disk.partition.queue.depth"`
 	MongodbatlasDiskPartitionSpaceAverage                 MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.average"`
 	MongodbatlasDiskPartitionSpaceMax                     MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.max"`
+	MongodbatlasDiskPartitionThroughput                   MetricConfig `mapstructure:"mongodbatlas.disk.partition.throughput"`
 	MongodbatlasDiskPartitionUsageAverage                 MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.average"`
 	MongodbatlasDiskPartitionUsageMax                     MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.max"`
 	MongodbatlasDiskPartitionUtilizationAverage           MetricConfig `mapstructure:"mongodbatlas.disk.partition.utilization.average"`
@@ -43,6 +45,7 @@ type MetricsConfig struct {
 	MongodbatlasProcessAsserts                            MetricConfig `mapstructure:"mongodbatlas.process.asserts"`
 	MongodbatlasProcessBackgroundFlush                    MetricConfig `mapstructure:"mongodbatlas.process.background_flush"`
 	MongodbatlasProcessCacheIo                            MetricConfig `mapstructure:"mongodbatlas.process.cache.io"`
+	MongodbatlasProcessCacheRatio                         MetricConfig `mapstructure:"mongodbatlas.process.cache.ratio"`
 	MongodbatlasProcessCacheSize                          MetricConfig `mapstructure:"mongodbatlas.process.cache.size"`
 	MongodbatlasProcessConnections                        MetricConfig `mapstructure:"mongodbatlas.process.connections"`
 	MongodbatlasProcessCPUChildrenNormalizedUsageAverage  MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.normalized.usage.average"`
@@ -112,12 +115,18 @@ func DefaultMetricsConfig() MetricsConfig {
 		MongodbatlasDiskPartitionLatencyMax: MetricConfig{
 			Enabled: true,
 		},
+		MongodbatlasDiskPartitionQueueDepth: MetricConfig{
+			Enabled: false,
+		},
 		MongodbatlasDiskPartitionSpaceAverage: MetricConfig{
 			Enabled: true,
 		},
 		MongodbatlasDiskPartitionSpaceMax: MetricConfig{
 			Enabled: true,
 		},
+		MongodbatlasDiskPartitionThroughput: MetricConfig{
+			Enabled: false,
+		},
 		MongodbatlasDiskPartitionUsageAverage: MetricConfig{
 			Enabled: true,
 		},
@@ -139,6 +148,9 @@ func DefaultMetricsConfig() MetricsConfig {
 		MongodbatlasProcessCacheIo: MetricConfig{
 			Enabled: true,
 		},
+		MongodbatlasProcessCacheRatio: MetricConfig{
+			Enabled: false,
+		},
 		MongodbatlasProcessCacheSize: MetricConfig{
 			Enabled: true,
 		},
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go
index 8c67cb277f9b..8046575b85b7 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go
@@ -31,8 +31,10 @@ func TestMetricsBuilderConfig(t *testing.T) {
 					MongodbatlasDiskPartitionIopsMax:                      MetricConfig{Enabled: true},
 					MongodbatlasDiskPartitionLatencyAverage:               MetricConfig{Enabled: true},
 					MongodbatlasDiskPartitionLatencyMax:                   MetricConfig{Enabled: true},
+					MongodbatlasDiskPartitionQueueDepth:                   MetricConfig{Enabled: true},
 					MongodbatlasDiskPartitionSpaceAverage:                 MetricConfig{Enabled: true},
 					MongodbatlasDiskPartitionSpaceMax:                     MetricConfig{Enabled: true},
+					MongodbatlasDiskPartitionThroughput:                   MetricConfig{Enabled: true},
 					MongodbatlasDiskPartitionUsageAverage:                 MetricConfig{Enabled: true},
 					MongodbatlasDiskPartitionUsageMax:                     MetricConfig{Enabled: true},
 					MongodbatlasDiskPartitionUtilizationAverage:           MetricConfig{Enabled: true},
@@ -40,6 +42,7 @@ func TestMetricsBuilderConfig(t *testing.T) {
 					MongodbatlasProcessAsserts:                            MetricConfig{Enabled: true},
 					MongodbatlasProcessBackgroundFlush:                    MetricConfig{Enabled: true},
 					MongodbatlasProcessCacheIo:                            MetricConfig{Enabled: true},
+					MongodbatlasProcessCacheRatio:                         MetricConfig{Enabled: true},
 					MongodbatlasProcessCacheSize:                          MetricConfig{Enabled: true},
 					MongodbatlasProcessConnections:                        MetricConfig{Enabled: true},
 					MongodbatlasProcessCPUChildrenNormalizedUsageAverage:  MetricConfig{Enabled: true},
@@ -115,8 +118,10 @@ func TestMetricsBuilderConfig(t *testing.T) {
 					MongodbatlasDiskPartitionIopsMax:                      MetricConfig{Enabled: false},
 					MongodbatlasDiskPartitionLatencyAverage:               MetricConfig{Enabled: false},
 					MongodbatlasDiskPartitionLatencyMax:                   MetricConfig{Enabled: false},
+					MongodbatlasDiskPartitionQueueDepth:                   MetricConfig{Enabled: false},
 					MongodbatlasDiskPartitionSpaceAverage:                 MetricConfig{Enabled: false},
 					MongodbatlasDiskPartitionSpaceMax:                     MetricConfig{Enabled: false},
+					MongodbatlasDiskPartitionThroughput:                   MetricConfig{Enabled: false},
 					MongodbatlasDiskPartitionUsageAverage:                 MetricConfig{Enabled: false},
 					MongodbatlasDiskPartitionUsageMax:                     MetricConfig{Enabled: false},
 					MongodbatlasDiskPartitionUtilizationAverage:           MetricConfig{Enabled: false},
@@ -124,6 +129,7 @@ func TestMetricsBuilderConfig(t *testing.T) {
 					MongodbatlasProcessAsserts:                            MetricConfig{Enabled: false},
 					MongodbatlasProcessBackgroundFlush:                    MetricConfig{Enabled: false},
 					MongodbatlasProcessCacheIo:                            MetricConfig{Enabled: false},
+					MongodbatlasProcessCacheRatio:                         MetricConfig{Enabled: false},
 					MongodbatlasProcessCacheSize:                          MetricConfig{Enabled: false},
 					MongodbatlasProcessConnections:                        MetricConfig{Enabled: false},
 					MongodbatlasProcessCPUChildrenNormalizedUsageAverage:  MetricConfig{Enabled: false},
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go
index 1e2d40f96785..356b4f1edd78 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go
@@ -102,6 +102,32 @@ var MapAttributeCacheDirection = map[string]AttributeCacheDirection{
 	"written_from": AttributeCacheDirectionWrittenFrom,
 }
 
+// AttributeCacheRatioType specifies the a value cache_ratio_type attribute.
+type AttributeCacheRatioType int
+
+const (
+	_ AttributeCacheRatioType = iota
+	AttributeCacheRatioTypeCacheFill
+	AttributeCacheRatioTypeDirtyFill
+)
+
+// String returns the string representation of the AttributeCacheRatioType.
+func (av AttributeCacheRatioType) String() string {
+	switch av {
+	case AttributeCacheRatioTypeCacheFill:
+		return "cache_fill"
+	case AttributeCacheRatioTypeDirtyFill:
+		return "dirty_fill"
+	}
+	return ""
+}
+
+// MapAttributeCacheRatioType is a helper map of string to AttributeCacheRatioType attribute value.
+var MapAttributeCacheRatioType = map[string]AttributeCacheRatioType{
+	"cache_fill": AttributeCacheRatioTypeCacheFill,
+	"dirty_fill": AttributeCacheRatioTypeDirtyFill,
+}
+
 // AttributeCacheStatus specifies the a value cache_status attribute.
 type AttributeCacheStatus int
 
@@ -582,6 +608,7 @@ const (
 	AttributeOperationGetmore
 	AttributeOperationInsert
 	AttributeOperationScanAndOrder
+	AttributeOperationTTLDeleted
 )
 
 // String returns the string representation of the AttributeOperation.
@@ -601,6 +628,8 @@ func (av AttributeOperation) String() string {
 		return "insert"
 	case AttributeOperationScanAndOrder:
 		return "scan_and_order"
+	case AttributeOperationTTLDeleted:
+		return "ttl_deleted"
 	}
 	return ""
 }
@@ -614,6 +643,7 @@ var MapAttributeOperation = map[string]AttributeOperation{
 	"getmore":        AttributeOperationGetmore,
 	"insert":         AttributeOperationInsert,
 	"scan_and_order": AttributeOperationScanAndOrder,
+	"ttl_deleted":    AttributeOperationTTLDeleted,
 }
 
 // AttributeOplogType specifies the a value oplog_type attribute.
@@ -1038,6 +1068,55 @@ func newMetricMongodbatlasDiskPartitionLatencyMax(cfg MetricConfig) metricMongod
 	return m
 }
 
+type metricMongodbatlasDiskPartitionQueueDepth struct {
+	data     pmetric.Metric // data buffer for generated metric.
+	config   MetricConfig   // metric config provided by user.
+	capacity int            // max observed number of data points added to the metric.
+}
+
+// init fills mongodbatlas.disk.partition.queue.depth metric with initial data.
+func (m *metricMongodbatlasDiskPartitionQueueDepth) init() {
+	m.data.SetName("mongodbatlas.disk.partition.queue.depth")
+	m.data.SetDescription("Disk queue depth")
+	m.data.SetUnit("1")
+	m.data.SetEmptyGauge()
+}
+
+func (m *metricMongodbatlasDiskPartitionQueueDepth) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+	if !m.config.Enabled {
+		return
+	}
+	dp := m.data.Gauge().DataPoints().AppendEmpty()
+	dp.SetStartTimestamp(start)
+	dp.SetTimestamp(ts)
+	dp.SetDoubleValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMongodbatlasDiskPartitionQueueDepth) updateCapacity() {
+	if m.data.Gauge().DataPoints().Len() > m.capacity {
+		m.capacity = m.data.Gauge().DataPoints().Len()
+	}
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMongodbatlasDiskPartitionQueueDepth) emit(metrics pmetric.MetricSlice) {
+	if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+		m.updateCapacity()
+		m.data.MoveTo(metrics.AppendEmpty())
+		m.init()
+	}
+}
+
+func newMetricMongodbatlasDiskPartitionQueueDepth(cfg MetricConfig) metricMongodbatlasDiskPartitionQueueDepth {
+	m := metricMongodbatlasDiskPartitionQueueDepth{config: cfg}
+	if cfg.Enabled {
+		m.data = pmetric.NewMetric()
+		m.init()
+	}
+	return m
+}
+
 type metricMongodbatlasDiskPartitionSpaceAverage struct {
 	data     pmetric.Metric // data buffer for generated metric.
 	config   MetricConfig   // metric config provided by user.
@@ -1140,6 +1219,57 @@ func newMetricMongodbatlasDiskPartitionSpaceMax(cfg MetricConfig) metricMongodba
 	return m
 }
 
+type metricMongodbatlasDiskPartitionThroughput struct {
+	data     pmetric.Metric // data buffer for generated metric.
+	config   MetricConfig   // metric config provided by user.
+	capacity int            // max observed number of data points added to the metric.
+}
+
+// init fills mongodbatlas.disk.partition.throughput metric with initial data.
+func (m *metricMongodbatlasDiskPartitionThroughput) init() {
+	m.data.SetName("mongodbatlas.disk.partition.throughput")
+	m.data.SetDescription("Disk throughput")
+	m.data.SetUnit("By/s")
+	m.data.SetEmptyGauge()
+	m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricMongodbatlasDiskPartitionThroughput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
+	if !m.config.Enabled {
+		return
+	}
+	dp := m.data.Gauge().DataPoints().AppendEmpty()
+	dp.SetStartTimestamp(start)
+	dp.SetTimestamp(ts)
+	dp.SetDoubleValue(val)
+	dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMongodbatlasDiskPartitionThroughput) updateCapacity() {
+	if m.data.Gauge().DataPoints().Len() > m.capacity {
+		m.capacity = m.data.Gauge().DataPoints().Len()
+	}
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMongodbatlasDiskPartitionThroughput) emit(metrics pmetric.MetricSlice) {
+	if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+		m.updateCapacity()
+		m.data.MoveTo(metrics.AppendEmpty())
+		m.init()
+	}
+}
+
+func newMetricMongodbatlasDiskPartitionThroughput(cfg MetricConfig) metricMongodbatlasDiskPartitionThroughput {
+	m := metricMongodbatlasDiskPartitionThroughput{config: cfg}
+	if cfg.Enabled {
+		m.data = pmetric.NewMetric()
+		m.init()
+	}
+	return m
+}
+
 type metricMongodbatlasDiskPartitionUsageAverage struct {
 	data     pmetric.Metric // data buffer for generated metric.
 	config   MetricConfig   // metric config provided by user.
@@ -1491,6 +1621,57 @@ func newMetricMongodbatlasProcessCacheIo(cfg MetricConfig) metricMongodbatlasPro
 	return m
 }
 
+type metricMongodbatlasProcessCacheRatio struct {
+	data     pmetric.Metric // data buffer for generated metric.
+	config   MetricConfig   // metric config provided by user.
+	capacity int            // max observed number of data points added to the metric.
+}
+
+// init fills mongodbatlas.process.cache.ratio metric with initial data.
+func (m *metricMongodbatlasProcessCacheRatio) init() {
+	m.data.SetName("mongodbatlas.process.cache.ratio")
+	m.data.SetDescription("Cache ratios represented as (%)")
+	m.data.SetUnit("%")
+	m.data.SetEmptyGauge()
+	m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricMongodbatlasProcessCacheRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue string) {
+	if !m.config.Enabled {
+		return
+	}
+	dp := m.data.Gauge().DataPoints().AppendEmpty()
+	dp.SetStartTimestamp(start)
+	dp.SetTimestamp(ts)
+	dp.SetDoubleValue(val)
+	dp.Attributes().PutStr("cache_ratio_type", cacheRatioTypeAttributeValue)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMongodbatlasProcessCacheRatio) updateCapacity() {
+	if m.data.Gauge().DataPoints().Len() > m.capacity {
+		m.capacity = m.data.Gauge().DataPoints().Len()
+	}
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMongodbatlasProcessCacheRatio) emit(metrics pmetric.MetricSlice) {
+	if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+		m.updateCapacity()
+		m.data.MoveTo(metrics.AppendEmpty())
+		m.init()
+	}
+}
+
+func newMetricMongodbatlasProcessCacheRatio(cfg MetricConfig) metricMongodbatlasProcessCacheRatio {
+	m := metricMongodbatlasProcessCacheRatio{config: cfg}
+	if cfg.Enabled {
+		m.data = pmetric.NewMetric()
+		m.init()
+	}
+	return m
+}
+
 type metricMongodbatlasProcessCacheSize struct {
 	data     pmetric.Metric // data buffer for generated metric.
 	config   MetricConfig   // metric config provided by user.
@@ -3897,8 +4078,10 @@ type MetricsBuilder struct {
 	metricMongodbatlasDiskPartitionIopsMax                      metricMongodbatlasDiskPartitionIopsMax
 	metricMongodbatlasDiskPartitionLatencyAverage               metricMongodbatlasDiskPartitionLatencyAverage
 	metricMongodbatlasDiskPartitionLatencyMax                   metricMongodbatlasDiskPartitionLatencyMax
+	metricMongodbatlasDiskPartitionQueueDepth                   metricMongodbatlasDiskPartitionQueueDepth
 	metricMongodbatlasDiskPartitionSpaceAverage                 metricMongodbatlasDiskPartitionSpaceAverage
 	metricMongodbatlasDiskPartitionSpaceMax                     metricMongodbatlasDiskPartitionSpaceMax
+	metricMongodbatlasDiskPartitionThroughput                   metricMongodbatlasDiskPartitionThroughput
 	metricMongodbatlasDiskPartitionUsageAverage                 metricMongodbatlasDiskPartitionUsageAverage
 	metricMongodbatlasDiskPartitionUsageMax                     metricMongodbatlasDiskPartitionUsageMax
 	metricMongodbatlasDiskPartitionUtilizationAverage           metricMongodbatlasDiskPartitionUtilizationAverage
@@ -3906,6 +4089,7 @@ type MetricsBuilder struct {
 	metricMongodbatlasProcessAsserts                            metricMongodbatlasProcessAsserts
 	metricMongodbatlasProcessBackgroundFlush                    metricMongodbatlasProcessBackgroundFlush
 	metricMongodbatlasProcessCacheIo                            metricMongodbatlasProcessCacheIo
+	metricMongodbatlasProcessCacheRatio                         metricMongodbatlasProcessCacheRatio
 	metricMongodbatlasProcessCacheSize                          metricMongodbatlasProcessCacheSize
 	metricMongodbatlasProcessConnections                        metricMongodbatlasProcessConnections
 	metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage  metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage
@@ -3985,8 +4169,10 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt
 		metricMongodbatlasDiskPartitionIopsMax:                      newMetricMongodbatlasDiskPartitionIopsMax(mbc.Metrics.MongodbatlasDiskPartitionIopsMax),
 		metricMongodbatlasDiskPartitionLatencyAverage:               newMetricMongodbatlasDiskPartitionLatencyAverage(mbc.Metrics.MongodbatlasDiskPartitionLatencyAverage),
 		metricMongodbatlasDiskPartitionLatencyMax:                   newMetricMongodbatlasDiskPartitionLatencyMax(mbc.Metrics.MongodbatlasDiskPartitionLatencyMax),
+		metricMongodbatlasDiskPartitionQueueDepth:                   newMetricMongodbatlasDiskPartitionQueueDepth(mbc.Metrics.MongodbatlasDiskPartitionQueueDepth),
 		metricMongodbatlasDiskPartitionSpaceAverage:                 newMetricMongodbatlasDiskPartitionSpaceAverage(mbc.Metrics.MongodbatlasDiskPartitionSpaceAverage),
 		metricMongodbatlasDiskPartitionSpaceMax:                     newMetricMongodbatlasDiskPartitionSpaceMax(mbc.Metrics.MongodbatlasDiskPartitionSpaceMax),
+		metricMongodbatlasDiskPartitionThroughput:                   newMetricMongodbatlasDiskPartitionThroughput(mbc.Metrics.MongodbatlasDiskPartitionThroughput),
 		metricMongodbatlasDiskPartitionUsageAverage:                 newMetricMongodbatlasDiskPartitionUsageAverage(mbc.Metrics.MongodbatlasDiskPartitionUsageAverage),
 		metricMongodbatlasDiskPartitionUsageMax:                     newMetricMongodbatlasDiskPartitionUsageMax(mbc.Metrics.MongodbatlasDiskPartitionUsageMax),
 		metricMongodbatlasDiskPartitionUtilizationAverage:           newMetricMongodbatlasDiskPartitionUtilizationAverage(mbc.Metrics.MongodbatlasDiskPartitionUtilizationAverage),
@@ -3994,6 +4180,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt
 		metricMongodbatlasProcessAsserts:                            newMetricMongodbatlasProcessAsserts(mbc.Metrics.MongodbatlasProcessAsserts),
 		metricMongodbatlasProcessBackgroundFlush:                    newMetricMongodbatlasProcessBackgroundFlush(mbc.Metrics.MongodbatlasProcessBackgroundFlush),
 		metricMongodbatlasProcessCacheIo:                            newMetricMongodbatlasProcessCacheIo(mbc.Metrics.MongodbatlasProcessCacheIo),
+		metricMongodbatlasProcessCacheRatio:                         newMetricMongodbatlasProcessCacheRatio(mbc.Metrics.MongodbatlasProcessCacheRatio),
 		metricMongodbatlasProcessCacheSize:                          newMetricMongodbatlasProcessCacheSize(mbc.Metrics.MongodbatlasProcessCacheSize),
 		metricMongodbatlasProcessConnections:                        newMetricMongodbatlasProcessConnections(mbc.Metrics.MongodbatlasProcessConnections),
 		metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage:  newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(mbc.Metrics.MongodbatlasProcessCPUChildrenNormalizedUsageAverage),
@@ -4197,8 +4384,10 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
 	mb.metricMongodbatlasDiskPartitionIopsMax.emit(ils.Metrics())
 	mb.metricMongodbatlasDiskPartitionLatencyAverage.emit(ils.Metrics())
 	mb.metricMongodbatlasDiskPartitionLatencyMax.emit(ils.Metrics())
+	mb.metricMongodbatlasDiskPartitionQueueDepth.emit(ils.Metrics())
 	mb.metricMongodbatlasDiskPartitionSpaceAverage.emit(ils.Metrics())
 	mb.metricMongodbatlasDiskPartitionSpaceMax.emit(ils.Metrics())
+	mb.metricMongodbatlasDiskPartitionThroughput.emit(ils.Metrics())
 	mb.metricMongodbatlasDiskPartitionUsageAverage.emit(ils.Metrics())
 	mb.metricMongodbatlasDiskPartitionUsageMax.emit(ils.Metrics())
 	mb.metricMongodbatlasDiskPartitionUtilizationAverage.emit(ils.Metrics())
@@ -4206,6 +4395,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
 	mb.metricMongodbatlasProcessAsserts.emit(ils.Metrics())
 	mb.metricMongodbatlasProcessBackgroundFlush.emit(ils.Metrics())
 	mb.metricMongodbatlasProcessCacheIo.emit(ils.Metrics())
+	mb.metricMongodbatlasProcessCacheRatio.emit(ils.Metrics())
 	mb.metricMongodbatlasProcessCacheSize.emit(ils.Metrics())
 	mb.metricMongodbatlasProcessConnections.emit(ils.Metrics())
 	mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.emit(ils.Metrics())
@@ -4314,6 +4504,11 @@ func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts
 	mb.metricMongodbatlasDiskPartitionLatencyMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
 }
 
+// RecordMongodbatlasDiskPartitionQueueDepthDataPoint adds a data point to mongodbatlas.disk.partition.queue.depth metric.
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts pcommon.Timestamp, val float64) {
+	mb.metricMongodbatlasDiskPartitionQueueDepth.recordDataPoint(mb.startTime, ts, val)
+}
+
 // RecordMongodbatlasDiskPartitionSpaceAverageDataPoint adds a data point to mongodbatlas.disk.partition.space.average metric.
 func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
 	mb.metricMongodbatlasDiskPartitionSpaceAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
@@ -4324,6 +4519,11 @@ func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pc
 	mb.metricMongodbatlasDiskPartitionSpaceMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
 }
 
+// RecordMongodbatlasDiskPartitionThroughputDataPoint adds a data point to mongodbatlas.disk.partition.throughput metric.
+func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionThroughputDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
+	mb.metricMongodbatlasDiskPartitionThroughput.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
+}
+
 // RecordMongodbatlasDiskPartitionUsageAverageDataPoint adds a data point to mongodbatlas.disk.partition.usage.average metric.
 func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
 	mb.metricMongodbatlasDiskPartitionUsageAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
@@ -4359,6 +4559,11 @@ func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pcommon.T
 	mb.metricMongodbatlasProcessCacheIo.recordDataPoint(mb.startTime, ts, val, cacheDirectionAttributeValue.String())
 }
 
+// RecordMongodbatlasProcessCacheRatioDataPoint adds a data point to mongodbatlas.process.cache.ratio metric.
+func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheRatioDataPoint(ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue AttributeCacheRatioType) {
+	mb.metricMongodbatlasProcessCacheRatio.recordDataPoint(mb.startTime, ts, val, cacheRatioTypeAttributeValue.String())
+}
+
 // RecordMongodbatlasProcessCacheSizeDataPoint adds a data point to mongodbatlas.process.cache.size metric.
 func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pcommon.Timestamp, val float64, cacheStatusAttributeValue AttributeCacheStatus) {
 	mb.metricMongodbatlasProcessCacheSize.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue.String())
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go
index 6a8d5c246a0e..81ef8e032756 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go
@@ -92,6 +92,9 @@ func TestMetricsBuilder(t *testing.T) {
 			allMetricsCount++
 			mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, 1, AttributeDiskDirectionRead)
 
+			allMetricsCount++
+			mb.RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts, 1)
+
 			defaultMetricsCount++
 			allMetricsCount++
 			mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, 1, AttributeDiskStatusFree)
@@ -100,6 +103,9 @@ func TestMetricsBuilder(t *testing.T) {
 			allMetricsCount++
 			mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, 1, AttributeDiskStatusFree)
 
+			allMetricsCount++
+			mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, 1, AttributeDiskDirectionRead)
+
 			defaultMetricsCount++
 			allMetricsCount++
 			mb.RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts, 1, AttributeDiskStatusFree)
@@ -128,6 +134,9 @@ func TestMetricsBuilder(t *testing.T) {
 			allMetricsCount++
 			mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, 1, AttributeCacheDirectionReadInto)
 
+			allMetricsCount++
+			mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, 1, AttributeCacheRatioTypeCacheFill)
+
 			defaultMetricsCount++
 			allMetricsCount++
 			mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, 1, AttributeCacheStatusDirty)
@@ -442,6 +451,18 @@ func TestMetricsBuilder(t *testing.T) {
 					attrVal, ok := dp.Attributes().Get("disk_direction")
 					assert.True(t, ok)
 					assert.EqualValues(t, "read", attrVal.Str())
+				case "mongodbatlas.disk.partition.queue.depth":
+					assert.False(t, validatedMetrics["mongodbatlas.disk.partition.queue.depth"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.queue.depth")
+					validatedMetrics["mongodbatlas.disk.partition.queue.depth"] = true
+					assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+					assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+					assert.Equal(t, "Disk queue depth", ms.At(i).Description())
+					assert.Equal(t, "1", ms.At(i).Unit())
+					dp := ms.At(i).Gauge().DataPoints().At(0)
+					assert.Equal(t, start, dp.StartTimestamp())
+					assert.Equal(t, ts, dp.Timestamp())
+					assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+					assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01)
 				case "mongodbatlas.disk.partition.space.average":
 					assert.False(t, validatedMetrics["mongodbatlas.disk.partition.space.average"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.space.average")
 					validatedMetrics["mongodbatlas.disk.partition.space.average"] = true
@@ -472,6 +493,21 @@ func TestMetricsBuilder(t *testing.T) {
 					attrVal, ok := dp.Attributes().Get("disk_status")
 					assert.True(t, ok)
 					assert.EqualValues(t, "free", attrVal.Str())
+				case "mongodbatlas.disk.partition.throughput":
+					assert.False(t, validatedMetrics["mongodbatlas.disk.partition.throughput"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.throughput")
+					validatedMetrics["mongodbatlas.disk.partition.throughput"] = true
+					assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+					assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+					assert.Equal(t, "Disk throughput", ms.At(i).Description())
+					assert.Equal(t, "By/s", ms.At(i).Unit())
+					dp := ms.At(i).Gauge().DataPoints().At(0)
+					assert.Equal(t, start, dp.StartTimestamp())
+					assert.Equal(t, ts, dp.Timestamp())
+					assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+					assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01)
+					attrVal, ok := dp.Attributes().Get("disk_direction")
+					assert.True(t, ok)
+					assert.EqualValues(t, "read", attrVal.Str())
 				case "mongodbatlas.disk.partition.usage.average":
 					assert.False(t, validatedMetrics["mongodbatlas.disk.partition.usage.average"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.usage.average")
 					validatedMetrics["mongodbatlas.disk.partition.usage.average"] = true
@@ -568,6 +604,21 @@ func TestMetricsBuilder(t *testing.T) {
 					attrVal, ok := dp.Attributes().Get("cache_direction")
 					assert.True(t, ok)
 					assert.EqualValues(t, "read_into", attrVal.Str())
+				case "mongodbatlas.process.cache.ratio":
+					assert.False(t, validatedMetrics["mongodbatlas.process.cache.ratio"], "Found a duplicate in the metrics slice: mongodbatlas.process.cache.ratio")
+					validatedMetrics["mongodbatlas.process.cache.ratio"] = true
+					assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+					assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+					assert.Equal(t, "Cache ratios represented as (%)", ms.At(i).Description())
+					assert.Equal(t, "%", ms.At(i).Unit())
+					dp := ms.At(i).Gauge().DataPoints().At(0)
+					assert.Equal(t, start, dp.StartTimestamp())
+					assert.Equal(t, ts, dp.Timestamp())
+					assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+					assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01)
+					attrVal, ok := dp.Attributes().Get("cache_ratio_type")
+					assert.True(t, ok)
+					assert.EqualValues(t, "cache_fill", attrVal.Str())
 				case "mongodbatlas.process.cache.size":
 					assert.False(t, validatedMetrics["mongodbatlas.process.cache.size"], "Found a duplicate in the metrics slice: mongodbatlas.process.cache.size")
 					validatedMetrics["mongodbatlas.process.cache.size"] = true
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go
index e8793b609b3c..6ddb5bd840d8 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go
+++ b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go
@@ -152,6 +152,16 @@ func getRecordFunc(metricName string) metricRecordFunc {
 			mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatusUsed)
 		}
 
+	case "CACHE_FILL_RATIO":
+		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+			mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeCacheFill)
+		}
+
+	case "DIRTY_FILL_RATIO":
+		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+			mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeDirtyFill)
+		}
+
 	case "TICKETS_AVAILABLE_READS":
 		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
 			mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketTypeAvailableReads)
@@ -337,6 +347,10 @@ func getRecordFunc(metricName string) metricRecordFunc {
 		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
 			mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationInsert, AttributeClusterRolePrimary)
 		}
+	case "OPCOUNTER_TTL_DELETED":
+		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+			mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationTTLDeleted, AttributeClusterRolePrimary)
+		}
 
 	// Rate of database operations on MongoDB secondaries found in the opcountersRepl document that the serverStatus command collects.
 	case "OPCOUNTER_REPL_CMD":
@@ -735,6 +749,29 @@ func getRecordFunc(metricName string) metricRecordFunc {
 			mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
 		}
 
+	// Measures throughput of data read and written to the disk partition (not cache) used by MongoDB.
+	case "DISK_PARTITION_THROUGHPUT_READ":
+		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+			mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
+		}
+
+	case "DISK_PARTITION_THROUGHPUT_WRITE":
+		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+			mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
+		}
+
+	// This is a calculated metric that is the sum of the read and write throughput.
+	case "DISK_PARTITION_THROUGHPUT_TOTAL":
+		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+			mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
+		}
+
+	// Measures the queue depth of the disk partition used by MongoDB.
+	case "DISK_QUEUE_DEPTH":
+		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
+			mb.RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts, float64(*dp.Value))
+		}
+
 	// Measures latency per operation type of the disk partition used by MongoDB.
 	case "DISK_PARTITION_LATENCY_READ":
 		return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
@@ -849,7 +886,7 @@ func getRecordFunc(metricName string) metricRecordFunc {
 	}
 }
 
-func MeasurementsToMetric(mb *MetricsBuilder, meas *mongodbatlas.Measurements, _ bool) error {
+func MeasurementsToMetric(mb *MetricsBuilder, meas *mongodbatlas.Measurements) error {
 	recordFunc := getRecordFunc(meas.Name)
 	if recordFunc == nil {
 		return nil
diff --git a/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml
index cba17b3284e7..6a88579926fc 100644
--- a/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml
+++ b/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml
@@ -13,10 +13,14 @@ all_set:
       enabled: true
     mongodbatlas.disk.partition.latency.max:
       enabled: true
+    mongodbatlas.disk.partition.queue.depth:
+      enabled: true
     mongodbatlas.disk.partition.space.average:
       enabled: true
     mongodbatlas.disk.partition.space.max:
       enabled: true
+    mongodbatlas.disk.partition.throughput:
+      enabled: true
     mongodbatlas.disk.partition.usage.average:
       enabled: true
     mongodbatlas.disk.partition.usage.max:
@@ -31,6 +35,8 @@ all_set:
       enabled: true
     mongodbatlas.process.cache.io:
       enabled: true
+    mongodbatlas.process.cache.ratio:
+      enabled: true
     mongodbatlas.process.cache.size:
       enabled: true
     mongodbatlas.process.connections:
@@ -166,10 +172,14 @@ none_set:
       enabled: false
     mongodbatlas.disk.partition.latency.max:
       enabled: false
+    mongodbatlas.disk.partition.queue.depth:
+      enabled: false
     mongodbatlas.disk.partition.space.average:
       enabled: false
     mongodbatlas.disk.partition.space.max:
       enabled: false
+    mongodbatlas.disk.partition.throughput:
+      enabled: false
     mongodbatlas.disk.partition.usage.average:
       enabled: false
     mongodbatlas.disk.partition.usage.max:
@@ -184,6 +194,8 @@ none_set:
       enabled: false
     mongodbatlas.process.cache.io:
       enabled: false
+    mongodbatlas.process.cache.ratio:
+      enabled: false
     mongodbatlas.process.cache.size:
       enabled: false
     mongodbatlas.process.connections:
diff --git a/receiver/mongodbatlasreceiver/internal/metric_conversion.go b/receiver/mongodbatlasreceiver/internal/metric_conversion.go
index b1c5639d6660..1b5b156d1c2f 100644
--- a/receiver/mongodbatlasreceiver/internal/metric_conversion.go
+++ b/receiver/mongodbatlasreceiver/internal/metric_conversion.go
@@ -19,15 +19,79 @@ func processMeasurements(
 	var errs error
 
 	for _, meas := range measurements {
-		err := metadata.MeasurementsToMetric(mb, meas, false)
+		err := metadata.MeasurementsToMetric(mb, meas)
 		if err != nil {
 			errs = multierr.Append(errs, err)
 		}
 	}
 
+	err := calculateTotalMetrics(mb, measurements)
+	if err != nil {
+		errs = multierr.Append(errs, err)
+	}
 	if errs != nil {
 		return fmt.Errorf("errors occurred while processing measurements: %w", errs)
 	}
 
 	return nil
 }
+
+func calculateTotalMetrics(
+	mb *metadata.MetricsBuilder,
+	measurements []*mongodbatlas.Measurements,
+) error {
+	var err error
+	dptTotalMeasCombined := false
+	var dptTotalMeas *mongodbatlas.Measurements
+
+	for _, meas := range measurements {
+		switch meas.Name {
+		case "DISK_PARTITION_THROUGHPUT_READ", "DISK_PARTITION_THROUGHPUT_WRITE":
+			if dptTotalMeas == nil {
+				dptTotalMeas = cloneMeasurement(meas)
+				dptTotalMeas.Name = "DISK_PARTITION_THROUGHPUT_TOTAL"
+				continue
+			}
+
+			// Combine data point values with matching timestamps
+			for j, totalMeas := range dptTotalMeas.DataPoints {
+				if totalMeas.Timestamp != meas.DataPoints[j].Timestamp ||
+					(totalMeas.Value == nil && meas.DataPoints[j].Value == nil) {
+					continue
+				}
+				if totalMeas.Value == nil {
+					totalMeas.Value = new(float32)
+				}
+				addValue := *meas.DataPoints[j].Value
+				if meas.DataPoints[j].Value == nil {
+					addValue = 0
+				}
+				*totalMeas.Value += addValue
+				dptTotalMeasCombined = true
+			}
+		default:
+		}
+	}
+
+	if dptTotalMeasCombined {
+		err = metadata.MeasurementsToMetric(mb, dptTotalMeas)
+	}
+	return err
+}
+
+func cloneMeasurement(meas *mongodbatlas.Measurements) *mongodbatlas.Measurements {
+	clone := &mongodbatlas.Measurements{
+		Name:       meas.Name,
+		Units:      meas.Units,
+		DataPoints: make([]*mongodbatlas.DataPoints, len(meas.DataPoints)),
+	}
+
+	for i, dp := range meas.DataPoints {
+		if dp != nil {
+			newDP := *dp
+			clone.DataPoints[i] = &newDP
+		}
+	}
+
+	return clone
+}
diff --git a/receiver/mongodbatlasreceiver/metadata.yaml b/receiver/mongodbatlasreceiver/metadata.yaml
index 74e8b97e1d78..d2c84af480d0 100644
--- a/receiver/mongodbatlasreceiver/metadata.yaml
+++ b/receiver/mongodbatlasreceiver/metadata.yaml
@@ -90,6 +90,12 @@ attributes:
     enum:
       - read_into
       - written_from
+  cache_ratio_type:
+    description: Cache ratio type
+    type: string
+    enum:
+      - cache_fill
+      - dirty_fill
   cache_status:
     description: Cache status
     type: string
@@ -165,6 +171,7 @@ attributes:
       - getmore
       - insert
       - scan_and_order
+      - ttl_deleted
   cluster_role:
     description: Whether process is acting as replica or primary
     type: string
@@ -258,6 +265,14 @@ metrics:
     attributes: [cache_direction]
     gauge:
       value_type: double
+  mongodbatlas.process.cache.ratio:
+    enabled: false
+    description: Cache ratios represented as (%)
+    extended_documentation: Aggregate of MongoDB Metrics CACHE_FILL_RATIO, DIRTY_FILL_RATIO
+    unit: "%"
+    attributes: [cache_ratio_type]
+    gauge:
+      value_type: double
   mongodbatlas.process.cache.size:
     enabled: true
     description: Cache sizes
@@ -452,7 +467,7 @@ metrics:
   mongodbatlas.process.db.operations.rate:
     enabled: true
     description: DB Operation Rates
-    extended_documentation: Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT
+    extended_documentation: Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT, OPCOUNTER_TTL_DELETED
     unit: "{operations}/s"
     attributes: [operation, cluster_role]
     gauge:
@@ -618,6 +633,14 @@ metrics:
     attributes: [disk_direction]
     gauge:
       value_type: double
+  mongodbatlas.disk.partition.throughput:
+    enabled: false
+    description: Disk throughput
+    extended_documentation: Aggregate of MongoDB Metrics DISK_PARTITION_THROUGHPUT_READ, DISK_PARTITION_THROUGHPUT_WRITE
+    unit: By/s
+    attributes: [disk_direction]
+    gauge:
+      value_type: double
   mongodbatlas.disk.partition.usage.max:
     enabled: true
     description: Disk partition usage (%)
@@ -680,6 +703,14 @@ metrics:
     attributes: [disk_status]
     gauge:
       value_type: double
+  mongodbatlas.disk.partition.queue.depth:
+    enabled: false
+    description: Disk queue depth
+    extended_documentation: Aggregate of MongoDB Metrics DISK_QUEUE_DEPTH
+    unit: "1"
+    attributes: []
+    gauge:
+      value_type: double
   mongodbatlas.db.size:
     enabled: true
     description: Database feature size