diff --git a/esrally/metrics.py b/esrally/metrics.py index 690df8a88..efb56894e 100644 --- a/esrally/metrics.py +++ b/esrally/metrics.py @@ -1613,7 +1613,7 @@ def summary_stats(self, metric_name, task_name): median = self.store.get_median(metric_name, task=task_name, sample_type=SampleType.Normal) unit = self.store.get_unit(metric_name, task=task_name) stats = self.store.get_stats(metric_name, task=task_name, sample_type=SampleType.Normal) - if median and stats: + if mean and median and stats: return { "min": stats["min"], "mean": mean, @@ -1624,6 +1624,7 @@ def summary_stats(self, metric_name, task_name): else: return { "min": None, + "mean": None, "median": None, "max": None, "unit": unit diff --git a/tests/metrics_test.py b/tests/metrics_test.py index deaf2b9fb..a5f500764 100644 --- a/tests/metrics_test.py +++ b/tests/metrics_test.py @@ -1516,8 +1516,9 @@ def test_calculate_global_stats(self): cfg.add(config.Scope.application, "race", "pipeline", "from-sources-skip-build") cfg.add(config.Scope.application, "track", "params", {}) - index = track.Task(name="index #1", operation=track.Operation(name="index", operation_type=track.OperationType.Bulk, params=None)) - challenge = track.Challenge(name="unittest", schedule=[index], default=True) + index1 = track.Task(name="index #1", operation=track.Operation(name="index", operation_type=track.OperationType.Bulk, params=None)) + index2 = track.Task(name="index #2", operation=track.Operation(name="index", operation_type=track.OperationType.Bulk, params=None)) + challenge = track.Challenge(name="unittest", schedule=[index1, index2], default=True) t = track.Track("unittest", "unittest-track", challenges=[challenge]) store = metrics.metrics_store(cfg, read_only=False, track=t, challenge=challenge) @@ -1541,6 +1542,15 @@ def test_calculate_global_stats(self): meta_data={"success": False}) store.put_value_cluster_level("service_time", 210, unit="ms", task="index #1", operation_type=track.OperationType.Bulk, meta_data={"success": True}) + + # only warmup samples + store.put_value_cluster_level("throughput", 500, unit="docs/s", task="index #2", + sample_type=metrics.SampleType.Warmup, operation_type=track.OperationType.Bulk) + store.put_value_cluster_level("latency", 2800, unit="ms", task="index #2", operation_type=track.OperationType.Bulk, + sample_type=metrics.SampleType.Warmup) + store.put_value_cluster_level("service_time", 250, unit="ms", task="index #2", operation_type=track.OperationType.Bulk, + sample_type=metrics.SampleType.Warmup) + store.put_doc(doc={ "name": "ml_processing_time", "job": "benchmark_ml_job_1", @@ -1564,6 +1574,10 @@ def test_calculate_global_stats(self): [("50_0", 200), ("100_0", 210), ("mean", 200), ("unit", "ms")]), opm["service_time"]) self.assertAlmostEqual(0.3333333333333333, opm["error_rate"]) + opm2 = stats.metrics("index #2") + self.assertEqual(collections.OrderedDict( + [("min", None), ("mean", None), ("median", None), ("max", None), ("unit", "docs/s")]), opm2["throughput"]) + self.assertEqual(1, len(stats.ml_processing_time)) self.assertEqual("benchmark_ml_job_1", stats.ml_processing_time[0]["job"]) self.assertEqual(2.2, stats.ml_processing_time[0]["min"]) @@ -1663,6 +1677,7 @@ def test_as_flat_list(self): "operation": "index", "throughput": { "min": 450, + "mean": 450, "median": 450, "max": 452, "unit": "docs/s" @@ -1686,6 +1701,7 @@ def test_as_flat_list(self): "operation": "search", "throughput": { "min": 9, + "mean": 10, "median": 10, "max": 12, "unit": "ops/s" @@ -1750,6 +1766,7 @@ def test_as_flat_list(self): "operation": "index", "value": { "min": 450, + "mean": 450, "median": 450, "max": 452, "unit": "docs/s" @@ -1807,6 +1824,7 @@ def test_as_flat_list(self): "operation": "search", "value": { "min": 9, + "mean": 10, "median": 10, "max": 12, "unit": "ops/s"