diff --git a/tests/client_test.py b/tests/client_test.py index b95dcb402..b7a304e9e 100644 --- a/tests/client_test.py +++ b/tests/client_test.py @@ -25,6 +25,7 @@ import elasticsearch import urllib3.exceptions +import pytest from esrally import client, exceptions, doc_link from esrally.utils import console @@ -42,12 +43,12 @@ def test_create_http_connection(self): f = client.EsClientFactory(hosts, client_options) - self.assertEqual(hosts, f.hosts) - self.assertIsNone(f.ssl_context) - self.assertEqual("http", f.client_options["scheme"]) - self.assertFalse("http_auth" in f.client_options) + assert f.hosts == hosts + assert f.ssl_context is None + assert f.client_options["scheme"] == "http" + assert "http_auth" not in f.client_options - self.assertDictEqual(original_client_options, client_options) + assert client_options == original_client_options @mock.patch.object(ssl.SSLContext, "load_cert_chain") def test_create_https_connection_verify_server(self, mocked_load_cert_chain): @@ -72,17 +73,17 @@ def test_create_https_connection_verify_server(self, mocked_load_cert_chain): assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied " \ "client certs" - self.assertEqual(hosts, f.hosts) - self.assertTrue(f.ssl_context.check_hostname) - self.assertEqual(ssl.CERT_REQUIRED, f.ssl_context.verify_mode) + assert f.hosts == hosts + assert f.ssl_context.check_hostname + assert f.ssl_context.verify_mode == ssl.CERT_REQUIRED - self.assertEqual("https", f.client_options["scheme"]) - self.assertEqual(("user", "password"), f.client_options["http_auth"]) - self.assertNotIn("use_ssl", f.client_options) - self.assertNotIn("verify_certs", f.client_options) - self.assertNotIn("ca_certs", f.client_options) + assert f.client_options["scheme"] == "https" + assert f.client_options["http_auth"] == ("user", "password") + assert "use_ssl" not in f.client_options + assert "verify_certs" not in f.client_options + assert "ca_certs" not in f.client_options - self.assertDictEqual(original_client_options, client_options) + assert client_options == original_client_options @mock.patch.object(ssl.SSLContext, "load_cert_chain") def test_create_https_connection_verify_self_signed_server_and_client_certificate(self, mocked_load_cert_chain): @@ -112,19 +113,19 @@ def test_create_https_connection_verify_self_signed_server_and_client_certificat keyfile=client_options["client_key"] ) - self.assertEqual(hosts, f.hosts) - self.assertTrue(f.ssl_context.check_hostname) - self.assertEqual(ssl.CERT_REQUIRED, f.ssl_context.verify_mode) + assert f.hosts == hosts + assert f.ssl_context.check_hostname + assert f.ssl_context.verify_mode == ssl.CERT_REQUIRED - self.assertEqual("https", f.client_options["scheme"]) - self.assertEqual(("user", "password"), f.client_options["http_auth"]) - self.assertNotIn("use_ssl", f.client_options) - self.assertNotIn("verify_certs", f.client_options) - self.assertNotIn("ca_certs", f.client_options) - self.assertNotIn("client_cert", f.client_options) - self.assertNotIn("client_key", f.client_options) + assert f.client_options["scheme"] == "https" + assert f.client_options["http_auth"] == ("user", "password") + assert "use_ssl" not in f.client_options + assert "verify_certs" not in f.client_options + assert "ca_certs" not in f.client_options + assert "client_cert" not in f.client_options + assert "client_key" not in f.client_options - self.assertDictEqual(original_client_options, client_options) + assert client_options == original_client_options @mock.patch.object(ssl.SSLContext, "load_cert_chain") def test_create_https_connection_only_verify_self_signed_server_certificate(self, mocked_load_cert_chain): @@ -149,17 +150,17 @@ def test_create_https_connection_only_verify_self_signed_server_certificate(self assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied " \ "client certs" - self.assertEqual(hosts, f.hosts) - self.assertTrue(f.ssl_context.check_hostname) - self.assertEqual(ssl.CERT_REQUIRED, f.ssl_context.verify_mode) + assert f.hosts == hosts + assert f.ssl_context.check_hostname + assert f.ssl_context.verify_mode == ssl.CERT_REQUIRED - self.assertEqual("https", f.client_options["scheme"]) - self.assertEqual(("user", "password"), f.client_options["http_auth"]) - self.assertNotIn("use_ssl", f.client_options) - self.assertNotIn("verify_certs", f.client_options) - self.assertNotIn("ca_certs", f.client_options) + assert f.client_options["scheme"] == "https" + assert f.client_options["http_auth"] == ("user", "password") + assert "use_ssl" not in f.client_options + assert "verify_certs" not in f.client_options + assert "ca_certs" not in f.client_options - self.assertDictEqual(original_client_options, client_options) + assert client_options == original_client_options def test_raises_error_when_only_one_of_client_cert_and_client_key_defined(self): hosts = [{"host": "127.0.0.1", "port": 9200}] @@ -181,7 +182,7 @@ def test_raises_error_when_only_one_of_client_cert_and_client_key_defined(self): {random_client_ssl_option: client_ssl_options[random_client_ssl_option]} ) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: with mock.patch.object(console, "println") as mocked_console_println: client.EsClientFactory(hosts, client_options) mocked_console_println.assert_called_once_with( @@ -193,13 +194,11 @@ def test_raises_error_when_only_one_of_client_cert_and_client_key_defined(self): console.format.link(doc_link("command_line_reference.html#client-options")) ) ) - self.assertEqual( + assert ctx.value.args[0] == \ "Cannot specify '{}' without also specifying '{}' in client-options.".format( random_client_ssl_option, missing_client_ssl_option - ), - ctx.exception.args[0] - ) + ) @mock.patch.object(ssl.SSLContext, "load_cert_chain") def test_create_https_connection_unverified_certificate(self, mocked_load_cert_chain): @@ -225,18 +224,18 @@ def test_create_https_connection_unverified_certificate(self, mocked_load_cert_c assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied " \ "client certs" - self.assertEqual(hosts, f.hosts) - self.assertFalse(f.ssl_context.check_hostname) - self.assertEqual(ssl.CERT_NONE, f.ssl_context.verify_mode) + assert f.hosts == hosts + assert not f.ssl_context.check_hostname + assert f.ssl_context.verify_mode == ssl.CERT_NONE - self.assertEqual("https", f.client_options["scheme"]) - self.assertEqual(("user", "password"), f.client_options["http_auth"]) - self.assertNotIn("use_ssl", f.client_options) - self.assertNotIn("verify_certs", f.client_options) - self.assertNotIn("basic_auth_user", f.client_options) - self.assertNotIn("basic_auth_password", f.client_options) + assert f.client_options["scheme"] == "https" + assert f.client_options["http_auth"] == ("user", "password") + assert "use_ssl" not in f.client_options + assert "verify_certs" not in f.client_options + assert "basic_auth_user" not in f.client_options + assert "basic_auth_password" not in f.client_options - self.assertDictEqual(original_client_options, client_options) + assert client_options == original_client_options @mock.patch.object(ssl.SSLContext, "load_cert_chain") def test_create_https_connection_unverified_certificate_present_client_certificates(self, mocked_load_cert_chain): @@ -264,21 +263,21 @@ def test_create_https_connection_unverified_certificate_present_client_certifica keyfile=client_options["client_key"] ) - self.assertEqual(hosts, f.hosts) - self.assertFalse(f.ssl_context.check_hostname) - self.assertEqual(ssl.CERT_NONE, f.ssl_context.verify_mode) + assert f.hosts == hosts + assert not f.ssl_context.check_hostname + assert f.ssl_context.verify_mode == ssl.CERT_NONE - self.assertEqual("https", f.client_options["scheme"]) - self.assertEqual(("user", "password"), f.client_options["http_auth"]) - self.assertNotIn("use_ssl", f.client_options) - self.assertNotIn("verify_certs", f.client_options) - self.assertNotIn("basic_auth_user", f.client_options) - self.assertNotIn("basic_auth_password", f.client_options) - self.assertNotIn("ca_certs", f.client_options) - self.assertNotIn("client_cert", f.client_options) - self.assertNotIn("client_key", f.client_options) + assert f.client_options["scheme"] == "https" + assert f.client_options["http_auth"] == ("user", "password") + assert "use_ssl" not in f.client_options + assert "verify_certs" not in f.client_options + assert "basic_auth_user" not in f.client_options + assert "basic_auth_password" not in f.client_options + assert "ca_certs" not in f.client_options + assert "client_cert" not in f.client_options + assert "client_key" not in f.client_options - self.assertDictEqual(original_client_options, client_options) + assert client_options == original_client_options class RequestContextManagerTests(TestCase): @@ -297,9 +296,9 @@ async def test_propagates_nested_context(self): top_level_duration = top_level_ctx.request_end - top_level_ctx.request_start # top level request should cover total duration - self.assertAlmostEqual(top_level_duration, 0.2, delta=0.05) + assert abs(top_level_duration-0.2) < 0.05 # nested request should only cover nested duration - self.assertAlmostEqual(nested_duration, 0.1, delta=0.05) + assert abs(nested_duration-0.1) < 0.05 class RestLayerTests(TestCase): @@ -310,7 +309,7 @@ def test_successfully_waits_for_rest_layer(self, es): {"host": "node-b.example.org", "port": 9200} ] - self.assertTrue(client.wait_for_rest_layer(es, max_attempts=3)) + assert client.wait_for_rest_layer(es, max_attempts=3) es.cluster.health.assert_has_calls([ mock.call(wait_for_nodes=">=2"), @@ -332,14 +331,14 @@ def test_retries_on_transport_errors(self, es, sleep): } } ] - self.assertTrue(client.wait_for_rest_layer(es, max_attempts=5)) + assert client.wait_for_rest_layer(es, max_attempts=5) # don't sleep in realtime @mock.patch("time.sleep") @mock.patch("elasticsearch.Elasticsearch") def test_dont_retry_eternally_on_transport_errors(self, es, sleep): es.cluster.health.side_effect = elasticsearch.TransportError(401, "Unauthorized") - self.assertFalse(client.wait_for_rest_layer(es, max_attempts=3)) + assert not client.wait_for_rest_layer(es, max_attempts=3) @mock.patch("elasticsearch.Elasticsearch") def test_ssl_error(self, es): @@ -347,6 +346,5 @@ def test_ssl_error(self, es): "[SSL: UNKNOWN_PROTOCOL] unknown protocol (_ssl.c:719)", urllib3.exceptions.SSLError( "[SSL: UNKNOWN_PROTOCOL] unknown protocol (_ssl.c:719)")) - with self.assertRaisesRegex(expected_exception=exceptions.SystemSetupError, - expected_regex="Could not connect to cluster via https. Is this an https endpoint?"): + with pytest.raises(exceptions.SystemSetupError, match="Could not connect to cluster via https. Is this an https endpoint?"): client.wait_for_rest_layer(es, max_attempts=3) diff --git a/tests/config_test.py b/tests/config_test.py index bab6165b4..52a693f90 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -18,6 +18,8 @@ import configparser from unittest import TestCase +import pytest + from esrally import config, exceptions @@ -84,13 +86,13 @@ def load(self, interpolation=None): class ConfigTests(TestCase): def test_load_non_existing_config(self): cfg = config.Config(config_file_class=InMemoryConfigStore) - self.assertFalse(cfg.config_present()) + assert not cfg.config_present() # standard properties are still available - self.assertEqual("rally-node", cfg.opts("provisioning", "node.name.prefix")) + assert cfg.opts("provisioning", "node.name.prefix") == "rally-node" def test_load_existing_config(self): cfg = config.Config(config_file_class=InMemoryConfigStore) - self.assertFalse(cfg.config_present()) + assert not cfg.config_present() sample_config = { "tests": { @@ -102,18 +104,18 @@ def test_load_existing_config(self): } cfg.config_file.store(sample_config) - self.assertTrue(cfg.config_present()) + assert cfg.config_present() cfg.load_config() # standard properties are still available - self.assertEqual("rally-node", cfg.opts("provisioning", "node.name.prefix")) - self.assertEqual("value", cfg.opts("tests", "sample.key")) + assert cfg.opts("provisioning", "node.name.prefix") == "rally-node" + assert cfg.opts("tests", "sample.key") == "value" # we can also override values cfg.add(config.Scope.applicationOverride, "tests", "sample.key", "override") - self.assertEqual("override", cfg.opts("tests", "sample.key")) + assert cfg.opts("tests", "sample.key") == "override" def test_load_all_opts_in_section(self): cfg = config.Config(config_file_class=InMemoryConfigStore) - self.assertFalse(cfg.config_present()) + assert not cfg.config_present() sample_config = { "distributions": { @@ -131,18 +133,18 @@ def test_load_all_opts_in_section(self): } cfg.config_file.store(sample_config) - self.assertTrue(cfg.config_present()) + assert cfg.config_present() cfg.load_config() # override a value so we can see that the scoping logic still works. Default is scope "application" cfg.add(config.Scope.applicationOverride, "distributions", "snapshot.cache", "true") - self.assertEqual({ + assert cfg.all_opts("distributions") == { "release.url": "https://acme.com/releases", "release.cache": "true", "snapshot.url": "https://acme.com/snapshots", # overridden! "snapshot.cache": "true" - }, cfg.all_opts("distributions")) + } def test_add_all_in_section(self): source_cfg = config.Config(config_file_class=InMemoryConfigStore) @@ -163,11 +165,11 @@ def test_add_all_in_section(self): target_cfg = config.Config(config_file_class=InMemoryConfigStore) - self.assertIsNone(target_cfg.opts("tests", "sample.key", mandatory=False)) + assert target_cfg.opts("tests", "sample.key", mandatory=False) is None target_cfg.add_all(source=source_cfg, section="tests") - self.assertEqual("value", target_cfg.opts("tests", "sample.key")) - self.assertIsNone(target_cfg.opts("no_copy", "other.key", mandatory=False)) + assert target_cfg.opts("tests", "sample.key") == "value" + assert target_cfg.opts("no_copy", "other.key", mandatory=False) is None # nonexisting key will not throw an error target_cfg.add_all(source=source_cfg, section="this section does not exist") @@ -185,9 +187,9 @@ def test_can_create_non_existing_config(self): base_cfg.add(config.Scope.application, "defaults", "preserve_benchmark_candidate", True) cfg = config.auto_load_local_config(base_cfg, config_file_class=InMemoryConfigStore) - self.assertTrue(cfg.config_file.present) + assert cfg.config_file.present # did not just copy base config - self.assertNotEqual(base_cfg.opts("benchmarks", "local.dataset.cache"), cfg.opts("benchmarks", "local.dataset.cache")) + assert base_cfg.opts("benchmarks", "local.dataset.cache") != cfg.opts("benchmarks", "local.dataset.cache") # copied sections from base config self.assert_equals_base_config(base_cfg, cfg, "reporting", "datastore.type") self.assert_equals_base_config(base_cfg, cfg, "tracks", "metrics.url") @@ -217,11 +219,11 @@ def test_can_load_and_amend_existing_config(self): "local.dataset.cache": "/tmp/rally/data" } }) - self.assertTrue(cfg.config_file.present) + assert cfg.config_file.present # did not just copy base config - self.assertNotEqual(base_cfg.opts("benchmarks", "local.dataset.cache"), cfg.opts("benchmarks", "local.dataset.cache")) + assert base_cfg.opts("benchmarks", "local.dataset.cache") != cfg.opts("benchmarks", "local.dataset.cache") # keeps config properties - self.assertEqual("existing-unit-test-config", cfg.opts("system", "env.name")) + assert cfg.opts("system", "env.name") == "existing-unit-test-config" # copies additional properties self.assert_equals_base_config(base_cfg, cfg, "unit-test", "sample.property") @@ -252,14 +254,14 @@ def test_can_migrate_outdated_config(self): "java8.home": "/opt/jdk8" } }) - self.assertTrue(cfg.config_file.present) + assert cfg.config_file.present # did not just copy base config - self.assertNotEqual(base_cfg.opts("benchmarks", "local.dataset.cache"), cfg.opts("benchmarks", "local.dataset.cache")) + assert base_cfg.opts("benchmarks", "local.dataset.cache") != cfg.opts("benchmarks", "local.dataset.cache") # migrated existing config - self.assertEqual(config.Config.CURRENT_CONFIG_VERSION, int(cfg.opts("meta", "config.version"))) + assert int(cfg.opts("meta", "config.version")) == config.Config.CURRENT_CONFIG_VERSION def assert_equals_base_config(self, base_config, local_config, section, key): - self.assertEqual(base_config.opts(section, key), local_config.opts(section, key)) + assert local_config.opts(section, key) == base_config.opts(section, key) class ConfigMigrationTests(TestCase): @@ -288,8 +290,7 @@ def test_does_not_migrate_outdated_config(self): } config_file.store(sample_config) - with self.assertRaisesRegex(exceptions.ConfigError, - "The config file.*is too old. Please delete it and reconfigure Rally from scratch"): + with pytest.raises(exceptions.ConfigError, match="The config file.*is too old. Please delete it and reconfigure Rally from scratch"): config.migrate(config_file, config.Config.EARLIEST_SUPPORTED_VERSION - 1, config.Config.CURRENT_CONFIG_VERSION, out=null_output) # catch all test, migrations are checked in more detail in the other tests @@ -327,5 +328,5 @@ def test_migrate_from_earliest_supported_to_latest(self): config.migrate(config_file, config.Config.EARLIEST_SUPPORTED_VERSION, config.Config.CURRENT_CONFIG_VERSION, out=null_output) if config.Config.EARLIEST_SUPPORTED_VERSION < config.Config.CURRENT_CONFIG_VERSION: - self.assertTrue(config_file.backup_created) - self.assertEqual(str(config.Config.CURRENT_CONFIG_VERSION), config_file.config["meta"]["config.version"]) + assert config_file.backup_created + assert config_file.config["meta"]["config.version"] == str(config.Config.CURRENT_CONFIG_VERSION) diff --git a/tests/driver/driver_test.py b/tests/driver/driver_test.py index fd820ea47..dbc0536d0 100644 --- a/tests/driver/driver_test.py +++ b/tests/driver/driver_test.py @@ -25,6 +25,7 @@ from unittest import TestCase import elasticsearch +import pytest from esrally import metrics, track, exceptions, config from esrally.driver import driver, runner, scheduler @@ -142,7 +143,7 @@ def test_start_benchmark_and_prepare_track(self, resolve): ]) # Did we start all load generators? There is no specific mock assert for this... - self.assertEqual(4, target.start_worker.call_count) + assert target.start_worker.call_count == 4 def test_assign_drivers_round_robin(self): target = self.create_test_driver_target() @@ -162,7 +163,7 @@ def test_assign_drivers_round_robin(self): ]) # Did we start all load generators? There is no specific mock assert for this... - self.assertEqual(4, target.start_worker.call_count) + assert target.start_worker.call_count == 4 def test_client_reaches_join_point_others_still_executing(self): target = self.create_test_driver_target() @@ -171,16 +172,16 @@ def test_client_reaches_join_point_others_still_executing(self): d.prepare_benchmark(t=self.track) d.start_benchmark() - self.assertEqual(0, len(d.workers_completed_current_step)) + assert len(d.workers_completed_current_step) == 0 d.joinpoint_reached(worker_id=0, worker_local_timestamp=10, task_allocations=[driver.ClientAllocation(client_id=0, task=driver.JoinPoint(id=0))]) - self.assertEqual(1, len(d.workers_completed_current_step)) + assert len(d.workers_completed_current_step) == 1 - self.assertEqual(0, target.on_task_finished.call_count) - self.assertEqual(0, target.drive_at.call_count) + assert target.on_task_finished.call_count == 0 + assert target.drive_at.call_count == 0 def test_client_reaches_join_point_which_completes_parent(self): target = self.create_test_driver_target() @@ -189,7 +190,7 @@ def test_client_reaches_join_point_which_completes_parent(self): d.prepare_benchmark(t=self.track) d.start_benchmark() - self.assertEqual(0, len(d.workers_completed_current_step)) + assert len(d.workers_completed_current_step) == 0 d.joinpoint_reached(worker_id=0, worker_local_timestamp=10, @@ -198,10 +199,10 @@ def test_client_reaches_join_point_which_completes_parent(self): task=driver.JoinPoint(id=0, clients_executing_completing_task=[0]))]) - self.assertEqual(-1, d.current_step) - self.assertEqual(1, len(d.workers_completed_current_step)) + assert d.current_step == -1 + assert len(d.workers_completed_current_step) == 1 # notified all drivers that they should complete the current task ASAP - self.assertEqual(4, target.complete_current_task.call_count) + assert target.complete_current_task.call_count == 4 # awaiting responses of other clients d.joinpoint_reached(worker_id=1, @@ -211,8 +212,8 @@ def test_client_reaches_join_point_which_completes_parent(self): task=driver.JoinPoint(id=0, clients_executing_completing_task=[0]))]) - self.assertEqual(-1, d.current_step) - self.assertEqual(2, len(d.workers_completed_current_step)) + assert d.current_step == -1 + assert len(d.workers_completed_current_step) == 2 d.joinpoint_reached(worker_id=2, worker_local_timestamp=12, @@ -220,8 +221,8 @@ def test_client_reaches_join_point_which_completes_parent(self): driver.ClientAllocation(client_id=2, task=driver.JoinPoint(id=0, clients_executing_completing_task=[0]))]) - self.assertEqual(-1, d.current_step) - self.assertEqual(3, len(d.workers_completed_current_step)) + assert d.current_step == -1 + assert len(d.workers_completed_current_step) == 3 d.joinpoint_reached(worker_id=3, worker_local_timestamp=13, @@ -231,13 +232,13 @@ def test_client_reaches_join_point_which_completes_parent(self): clients_executing_completing_task=[0]))]) # by now the previous step should be considered completed and we are at the next one - self.assertEqual(0, d.current_step) - self.assertEqual(0, len(d.workers_completed_current_step)) + assert d.current_step == 0 + assert len(d.workers_completed_current_step) == 0 # this requires at least Python 3.6 # target.on_task_finished.assert_called_once() - self.assertEqual(1, target.on_task_finished.call_count) - self.assertEqual(4, target.drive_at.call_count) + assert target.on_task_finished.call_count == 1 + assert target.drive_at.call_count == 4 def op(name, operation_type): @@ -374,7 +375,7 @@ def test_single_host_assignment_clients_matches_cores(self): assignments = driver.calculate_worker_assignments(host_configs, client_count=4) - self.assertEqual([ + assert assignments == [ { "host": "localhost", "workers": [ @@ -384,7 +385,7 @@ def test_single_host_assignment_clients_matches_cores(self): [3] ] } - ], assignments) + ] def test_single_host_assignment_more_clients_than_cores(self): host_configs = [{ @@ -394,7 +395,7 @@ def test_single_host_assignment_more_clients_than_cores(self): assignments = driver.calculate_worker_assignments(host_configs, client_count=6) - self.assertEqual([ + assert assignments == [ { "host": "localhost", "workers": [ @@ -404,7 +405,7 @@ def test_single_host_assignment_more_clients_than_cores(self): [5] ] } - ], assignments) + ] def test_single_host_assignment_less_clients_than_cores(self): host_configs = [{ @@ -414,7 +415,7 @@ def test_single_host_assignment_less_clients_than_cores(self): assignments = driver.calculate_worker_assignments(host_configs, client_count=2) - self.assertEqual([ + assert assignments == [ { "host": "localhost", "workers": [ @@ -424,7 +425,7 @@ def test_single_host_assignment_less_clients_than_cores(self): [] ] } - ], assignments) + ] def test_multiple_host_assignment_more_clients_than_cores(self): host_configs = [ @@ -440,7 +441,7 @@ def test_multiple_host_assignment_more_clients_than_cores(self): assignments = driver.calculate_worker_assignments(host_configs, client_count=16) - self.assertEqual([ + assert assignments == [ { "host": "host-a", "workers": [ @@ -459,7 +460,7 @@ def test_multiple_host_assignment_more_clients_than_cores(self): [14, 15] ] } - ], assignments) + ] def test_multiple_host_assignment_less_clients_than_cores(self): host_configs = [ @@ -475,7 +476,7 @@ def test_multiple_host_assignment_less_clients_than_cores(self): assignments = driver.calculate_worker_assignments(host_configs, client_count=4) - self.assertEqual([ + assert assignments == [ { "host": "host-a", "workers": [ @@ -494,7 +495,7 @@ def test_multiple_host_assignment_less_clients_than_cores(self): [] ] } - ], assignments) + ] def test_uneven_assignment_across_hosts(self): host_configs = [ @@ -514,7 +515,7 @@ def test_uneven_assignment_across_hosts(self): assignments = driver.calculate_worker_assignments(host_configs, client_count=17) - self.assertEqual([ + assert assignments == [ { "host": "host-a", "workers": [ @@ -542,7 +543,7 @@ def test_uneven_assignment_across_hosts(self): [16] ] } - ], assignments) + ] class AllocatorTests(TestCase): @@ -560,35 +561,35 @@ def test_allocates_one_task(self): allocator = driver.Allocator([task]) - self.assertEqual(1, allocator.clients) - self.assertEqual(3, len(allocator.allocations[0])) - self.assertEqual(2, len(allocator.join_points)) - self.assertEqual([{task}], allocator.tasks_per_joinpoint) + assert allocator.clients == 1 + assert len(allocator.allocations[0]) == 3 + assert len(allocator.join_points) == 2 + assert allocator.tasks_per_joinpoint == [{task}] def test_allocates_two_serial_tasks(self): task = track.Task("index", op("index", track.OperationType.Bulk)) allocator = driver.Allocator([task, task]) - self.assertEqual(1, allocator.clients) + assert allocator.clients == 1 # we have two operations and three join points - self.assertEqual(5, len(allocator.allocations[0])) - self.assertEqual(3, len(allocator.join_points)) - self.assertEqual([{task}, {task}], allocator.tasks_per_joinpoint) + assert len(allocator.allocations[0]) == 5 + assert len(allocator.join_points) == 3 + assert allocator.tasks_per_joinpoint == [{task}, {task}] def test_allocates_two_parallel_tasks(self): task = track.Task("index", op("index", track.OperationType.Bulk)) allocator = driver.Allocator([track.Parallel([task, task])]) - self.assertEqual(2, allocator.clients) - self.assertEqual(3, len(allocator.allocations[0])) - self.assertEqual(3, len(allocator.allocations[1])) - self.assertEqual(2, len(allocator.join_points)) - self.assertEqual([{task}], allocator.tasks_per_joinpoint) + assert allocator.clients == 2 + assert len(allocator.allocations[0]) == 3 + assert len(allocator.allocations[1]) == 3 + assert len(allocator.join_points) == 2 + assert allocator.tasks_per_joinpoint == [{task}] for join_point in allocator.join_points: - self.assertFalse(join_point.preceding_task_completes_parent) - self.assertEqual(0, join_point.num_clients_executing_completing_task) + assert not join_point.preceding_task_completes_parent + assert join_point.num_clients_executing_completing_task == 0 def test_a_task_completes_the_parallel_structure(self): taskA = track.Task("index-completing", op("index", track.OperationType.Bulk), completes_parent=True) @@ -596,15 +597,15 @@ def test_a_task_completes_the_parallel_structure(self): allocator = driver.Allocator([track.Parallel([taskA, taskB])]) - self.assertEqual(2, allocator.clients) - self.assertEqual(3, len(allocator.allocations[0])) - self.assertEqual(3, len(allocator.allocations[1])) - self.assertEqual(2, len(allocator.join_points)) - self.assertEqual([{taskA, taskB}], allocator.tasks_per_joinpoint) + assert allocator.clients == 2 + assert len(allocator.allocations[0]) == 3 + assert len(allocator.allocations[1]) == 3 + assert len(allocator.join_points) == 2 + assert allocator.tasks_per_joinpoint == [{taskA, taskB}] final_join_point = allocator.join_points[1] - self.assertTrue(final_join_point.preceding_task_completes_parent) - self.assertEqual(1, final_join_point.num_clients_executing_completing_task) - self.assertEqual([0], final_join_point.clients_executing_completing_task) + assert final_join_point.preceding_task_completes_parent + assert final_join_point.num_clients_executing_completing_task == 1 + assert final_join_point.clients_executing_completing_task == [0] def test_allocates_mixed_tasks(self): index = track.Task("index", op("index", track.OperationType.Bulk)) @@ -617,17 +618,17 @@ def test_allocates_mixed_tasks(self): index, track.Parallel([search, search, search])]) - self.assertEqual(3, allocator.clients) + assert allocator.clients == 3 # 1 join point, 1 op, 1 jp, 1 (parallel) op, 1 jp, 1 op, 1 jp, 1 op, 1 jp, 1 (parallel) op, 1 jp - self.assertEqual(11, len(allocator.allocations[0])) - self.assertEqual(11, len(allocator.allocations[1])) - self.assertEqual(11, len(allocator.allocations[2])) - self.assertEqual(6, len(allocator.join_points)) - self.assertEqual([{index}, {index, stats}, {index}, {index}, {search}], allocator.tasks_per_joinpoint) + assert len(allocator.allocations[0]) == 11 + assert len(allocator.allocations[1]) == 11 + assert len(allocator.allocations[2]) == 11 + assert len(allocator.join_points) == 6 + assert allocator.tasks_per_joinpoint == [{index}, {index, stats}, {index}, {index}, {search}] for join_point in allocator.join_points: - self.assertFalse(join_point.preceding_task_completes_parent) - self.assertEqual(0, join_point.num_clients_executing_completing_task) + assert not join_point.preceding_task_completes_parent + assert join_point.num_clients_executing_completing_task == 0 # TODO (follow-up PR): We should probably forbid this def test_allocates_more_tasks_than_clients(self): @@ -639,38 +640,38 @@ def test_allocates_more_tasks_than_clients(self): allocator = driver.Allocator([track.Parallel(tasks=[index_a, index_b, index_c, index_d, index_e], clients=2)]) - self.assertEqual(2, allocator.clients) + assert allocator.clients == 2 allocations = allocator.allocations # 2 clients - self.assertEqual(2, len(allocations)) + assert len(allocations) == 2 # join_point, index_a, index_c, index_e, join_point - self.assertEqual(5, len(allocations[0])) + assert len(allocations[0]) == 5 # we really have no chance to extract the join point so we just take what is there... - self.assertEqual([ + assert allocations[0] == [ allocations[0][0], self.ta(index_a, client_index_in_task=0, global_client_index=0, total_clients=2), self.ta(index_c, client_index_in_task=0, global_client_index=2, total_clients=2), self.ta(index_e, client_index_in_task=0, global_client_index=4, total_clients=2), allocations[0][4] - ], allocations[0]) + ] # join_point, index_a, index_c, None, join_point - self.assertEqual(5, len(allocator.allocations[1])) - self.assertEqual([ + assert len(allocator.allocations[1]) == 5 + assert allocations[1] == [ allocations[1][0], self.ta(index_b, client_index_in_task=0, global_client_index=1, total_clients=2), self.ta(index_d, client_index_in_task=0, global_client_index=3, total_clients=2), None, allocations[1][4] - ], allocations[1]) + ] - self.assertEqual([{index_a, index_b, index_c, index_d, index_e}], allocator.tasks_per_joinpoint) - self.assertEqual(2, len(allocator.join_points)) + assert allocator.tasks_per_joinpoint == [{index_a, index_b, index_c, index_d, index_e}] + assert len(allocator.join_points) == 2 final_join_point = allocator.join_points[1] - self.assertTrue(final_join_point.preceding_task_completes_parent) - self.assertEqual(1, final_join_point.num_clients_executing_completing_task) - self.assertEqual([1], final_join_point.clients_executing_completing_task) + assert final_join_point.preceding_task_completes_parent + assert final_join_point.num_clients_executing_completing_task == 1 + assert final_join_point.clients_executing_completing_task == [1] # TODO (follow-up PR): We should probably forbid this def test_considers_number_of_clients_per_subtask(self): @@ -680,51 +681,51 @@ def test_considers_number_of_clients_per_subtask(self): allocator = driver.Allocator([track.Parallel(tasks=[index_a, index_b, index_c], clients=3)]) - self.assertEqual(3, allocator.clients) + assert allocator.clients == 3 allocations = allocator.allocations # 3 clients - self.assertEqual(3, len(allocations)) + assert len(allocations) == 3 # tasks that client 0 will execute: # join_point, index_a, index_c, join_point - self.assertEqual(4, len(allocations[0])) + assert len(allocations[0]) == 4 # we really have no chance to extract the join point so we just take what is there... - self.assertEqual([ + assert allocations[0] == [ allocations[0][0], self.ta(index_a, client_index_in_task=0, global_client_index=0, total_clients=3), self.ta(index_c, client_index_in_task=1, global_client_index=3, total_clients=3), allocations[0][3] - ], allocations[0]) + ] # task that client 1 will execute: # join_point, index_b, None, join_point - self.assertEqual(4, len(allocator.allocations[1])) - self.assertEqual([ + assert len(allocator.allocations[1]) == 4 + assert allocations[1] == [ allocations[1][0], self.ta(index_b, client_index_in_task=0, global_client_index=1, total_clients=3), None, allocations[1][3] - ], allocations[1]) + ] # tasks that client 2 will execute: - self.assertEqual(4, len(allocator.allocations[2])) - self.assertEqual([ + assert len(allocator.allocations[2]) == 4 + assert allocations[2] == [ allocations[2][0], self.ta(index_c, client_index_in_task=0, global_client_index=2, total_clients=3), None, allocations[2][3] - ], allocations[2]) + ] - self.assertEqual([{index_a, index_b, index_c}], allocator.tasks_per_joinpoint) + assert allocator.tasks_per_joinpoint == [{index_a, index_b, index_c}] - self.assertEqual(2, len(allocator.join_points)) + assert len(allocator.join_points) == 2 final_join_point = allocator.join_points[1] - self.assertTrue(final_join_point.preceding_task_completes_parent) + assert final_join_point.preceding_task_completes_parent # task index_c has two clients, hence we have to wait for two clients to finish - self.assertEqual(2, final_join_point.num_clients_executing_completing_task) - self.assertEqual([2, 0], final_join_point.clients_executing_completing_task) + assert final_join_point.num_clients_executing_completing_task == 2 + assert final_join_point.clients_executing_completing_task == [2, 0] class MetricsAggregationTests(TestCase): @@ -741,13 +742,13 @@ def test_different_sample_types(self): aggregated = self.calculate_global_throughput(samples) - self.assertIn(op, aggregated) - self.assertEqual(1, len(aggregated)) + assert op in aggregated + assert len(aggregated) == 1 throughput = aggregated[op] - self.assertEqual(2, len(throughput)) - self.assertEqual((1470838595, 21, metrics.SampleType.Warmup, 3000, "docs/s"), throughput[0]) - self.assertEqual((1470838595.5, 21.5, metrics.SampleType.Normal, 3666.6666666666665, "docs/s"), throughput[1]) + assert len(throughput) == 2 + assert throughput[0] == (1470838595, 21, metrics.SampleType.Warmup, 3000, "docs/s") + assert throughput[1] == (1470838595.5, 21.5, metrics.SampleType.Normal, 3666.6666666666665, "docs/s") def test_single_metrics_aggregation(self): op = track.Operation("index", track.OperationType.Bulk, param_source="driver-test-param-source") @@ -766,17 +767,17 @@ def test_single_metrics_aggregation(self): aggregated = self.calculate_global_throughput(samples) - self.assertIn(op, aggregated) - self.assertEqual(1, len(aggregated)) + assert op in aggregated + assert len(aggregated) == 1 throughput = aggregated[op] - self.assertEqual(6, len(throughput)) - self.assertEqual((38595, 21, metrics.SampleType.Normal, 5000, "docs/s"), throughput[0]) - self.assertEqual((38596, 22, metrics.SampleType.Normal, 5000, "docs/s"), throughput[1]) - self.assertEqual((38597, 23, metrics.SampleType.Normal, 5000, "docs/s"), throughput[2]) - self.assertEqual((38598, 24, metrics.SampleType.Normal, 5000, "docs/s"), throughput[3]) - self.assertEqual((38599, 25, metrics.SampleType.Normal, 6000, "docs/s"), throughput[4]) - self.assertEqual((38600, 26, metrics.SampleType.Normal, 6666.666666666667, "docs/s"), throughput[5]) + assert len(throughput) == 6 + assert throughput[0] == (38595, 21, metrics.SampleType.Normal, 5000, "docs/s") + assert throughput[1] == (38596, 22, metrics.SampleType.Normal, 5000, "docs/s") + assert throughput[2] == (38597, 23, metrics.SampleType.Normal, 5000, "docs/s") + assert throughput[3] == (38598, 24, metrics.SampleType.Normal, 5000, "docs/s") + assert throughput[4] == (38599, 25, metrics.SampleType.Normal, 6000, "docs/s") + assert throughput[5] == (38600, 26, metrics.SampleType.Normal, 6666.666666666667, "docs/s") # self.assertEqual((1470838600.5, 26.5, metrics.SampleType.Normal, 10000), throughput[6]) def test_use_provided_throughput(self): @@ -791,14 +792,14 @@ def test_use_provided_throughput(self): aggregated = self.calculate_global_throughput(samples) - self.assertIn(op, aggregated) - self.assertEqual(1, len(aggregated)) + assert op in aggregated + assert len(aggregated) == 1 throughput = aggregated[op] - self.assertEqual(3, len(throughput)) - self.assertEqual((38595, 21, metrics.SampleType.Normal, 8000, "byte/s"), throughput[0]) - self.assertEqual((38596, 22, metrics.SampleType.Normal, 8000, "byte/s"), throughput[1]) - self.assertEqual((38597, 23, metrics.SampleType.Normal, 8000, "byte/s"), throughput[2]) + assert len(throughput) == 3 + assert throughput[0] == (38595, 21, metrics.SampleType.Normal, 8000, "byte/s") + assert throughput[1] == (38596, 22, metrics.SampleType.Normal, 8000, "byte/s") + assert throughput[2] == (38597, 23, metrics.SampleType.Normal, 8000, "byte/s") def calculate_global_throughput(self, samples): return driver.ThroughputCalculator().calculate(samples) @@ -841,11 +842,11 @@ async def assert_schedule(self, expected_schedule, schedule_handle, infinite_sch async for invocation_time, sample_type, progress_percent, runner, params in schedule_handle(): schedule_handle.before_request(now=idx) exp_invocation_time, exp_sample_type, exp_progress_percent, exp_params = expected_schedule[idx] - self.assertAlmostEqual(exp_invocation_time, invocation_time, msg="Invocation time for sample at index %d does not match" % idx) - self.assertEqual(exp_sample_type, sample_type, "Sample type for sample at index %d does not match" % idx) - self.assertEqual(exp_progress_percent, progress_percent, "Current progress for sample at index %d does not match" % idx) - self.assertIsNotNone(runner, "runner must be defined") - self.assertEqual(exp_params, params, "Parameters do not match") + assert round(abs(exp_invocation_time-invocation_time), 7) == 0, "Invocation time for sample at index %d does not match" % idx + assert sample_type == exp_sample_type, "Sample type for sample at index %d does not match" % idx + assert progress_percent == exp_progress_percent, "Current progress for sample at index %d does not match" % idx + assert runner is not None, "runner must be defined" + assert params == exp_params, "Parameters do not match" idx += 1 # for infinite schedules we only check the first few elements if infinite_schedule and idx == len(expected_schedule): @@ -853,7 +854,7 @@ async def assert_schedule(self, expected_schedule, schedule_handle, infinite_sch # simulate that the request is done - we only support throttling based on request count (ops). schedule_handle.after_request(now=idx, weight=1, unit="ops", request_meta_data=None) if not infinite_schedule: - self.assertEqual(len(expected_schedule), idx, msg="Number of elements in the schedules do not match") + assert idx == len(expected_schedule), "Number of elements in the schedules do not match" def setUp(self): self.test_track = track.Track(name="unittest") @@ -887,8 +888,8 @@ def test_injects_parameter_source_into_scheduler(self): param_source = track.operation_parameters(self.test_track, task) schedule = driver.schedule_for(task_allocation, param_source) - self.assertIsNotNone(schedule.sched.parameter_source, "Parameter source has not been injected into scheduler") - self.assertEqual(param_source, schedule.sched.parameter_source) + assert schedule.sched.parameter_source is not None, "Parameter source has not been injected into scheduler" + assert schedule.sched.parameter_source == param_source @run_async async def test_search_task_one_client(self): @@ -1111,24 +1112,24 @@ async def test_schedule_for_time_based(self): schedule_handle = driver.schedule_for(task_allocation, param_source) schedule_handle.start() # first client does not wait - self.assertEqual(0.0, schedule_handle.ramp_up_wait_time) + assert schedule_handle.ramp_up_wait_time == 0.0 schedule = schedule_handle() last_progress = -1 async for invocation_time, sample_type, progress_percent, runner, params in schedule: # we're not throughput throttled - self.assertEqual(0, invocation_time) + assert invocation_time == 0 if progress_percent <= 0.5: - self.assertEqual(metrics.SampleType.Warmup, sample_type) + assert sample_type == metrics.SampleType.Warmup else: - self.assertEqual(metrics.SampleType.Normal, sample_type) - self.assertTrue(last_progress < progress_percent) + assert sample_type == metrics.SampleType.Normal + assert last_progress < progress_percent last_progress = progress_percent - self.assertTrue(round(progress_percent, 2) >= 0.0, "progress should be >= 0.0 but was [%f]" % progress_percent) - self.assertTrue(round(progress_percent, 2) <= 1.0, "progress should be <= 1.0 but was [%f]" % progress_percent) - self.assertIsNotNone(runner, "runner must be defined") - self.assertEqual({"body": ["a"], "size": 11}, params) + assert round(progress_percent, 2) >= 0.0, "progress should be >= 0.0 but was [%f]" % progress_percent + assert round(progress_percent, 2) <= 1.0, "progress should be <= 1.0 but was [%f]" % progress_percent + assert runner is not None, "runner must be defined" + assert params == {"body": ["a"], "size": 11} @run_async async def test_schedule_for_time_based_with_multiple_clients(self): @@ -1150,24 +1151,24 @@ async def test_schedule_for_time_based_with_multiple_clients(self): schedule_handle = driver.schedule_for(task_allocation, param_source) schedule_handle.start() # client number 4 out of 8 -> 0.1 * (4 / 8) = 0.05 - self.assertEqual(0.05, schedule_handle.ramp_up_wait_time) + assert schedule_handle.ramp_up_wait_time == 0.05 schedule = schedule_handle() last_progress = -1 async for invocation_time, sample_type, progress_percent, runner, params in schedule: # we're not throughput throttled - self.assertEqual(0, invocation_time) + assert invocation_time == 0 if progress_percent <= 0.5: - self.assertEqual(metrics.SampleType.Warmup, sample_type) + assert sample_type == metrics.SampleType.Warmup else: - self.assertEqual(metrics.SampleType.Normal, sample_type) - self.assertTrue(last_progress < progress_percent) + assert sample_type == metrics.SampleType.Normal + assert last_progress < progress_percent last_progress = progress_percent - self.assertTrue(round(progress_percent, 2) >= 0.0, "progress should be >= 0.0 but was [%f]" % progress_percent) - self.assertTrue(round(progress_percent, 2) <= 1.0, "progress should be <= 1.0 but was [%f]" % progress_percent) - self.assertIsNotNone(runner, "runner must be defined") - self.assertEqual({"body": ["a"], "size": 11}, params) + assert round(progress_percent, 2) >= 0.0, "progress should be >= 0.0 but was [%f]" % progress_percent + assert round(progress_percent, 2) <= 1.0, "progress should be <= 1.0 but was [%f]" % progress_percent + assert runner is not None, "runner must be defined" + assert params == {"body": ["a"], "size": 11} @@ -1299,23 +1300,23 @@ async def test_execute_schedule_in_throughput_mode(self, es): samples = sampler.samples - self.assertTrue(len(samples) > 0) - self.assertFalse(complete.is_set(), "Executor should not auto-complete a normal task") + assert len(samples) > 0 + assert not complete.is_set(), "Executor should not auto-complete a normal task" previous_absolute_time = -1.0 previous_relative_time = -1.0 for sample in samples: - self.assertEqual(2, sample.client_id) - self.assertEqual(task, sample.task) - self.assertLess(previous_absolute_time, sample.absolute_time) + assert sample.client_id == 2 + assert sample.task == task + assert previous_absolute_time < sample.absolute_time previous_absolute_time = sample.absolute_time - self.assertLess(previous_relative_time, sample.relative_time) + assert previous_relative_time < sample.relative_time previous_relative_time = sample.relative_time # we don't have any warmup time period - self.assertEqual(metrics.SampleType.Normal, sample.sample_type) + assert sample.sample_type == metrics.SampleType.Normal # latency equals service time in throughput mode - self.assertEqual(sample.latency, sample.service_time) - self.assertEqual(1, sample.total_ops) - self.assertEqual("docs", sample.total_ops_unit) + assert sample.service_time == sample.latency + assert sample.total_ops == 1 + assert sample.total_ops_unit == "docs" @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -1358,27 +1359,27 @@ async def test_execute_schedule_with_progress_determined_by_runner(self, es): samples = sampler.samples - self.assertEqual(5, len(samples)) - self.assertTrue(self.runner_with_progress.completed) - self.assertEqual(1.0, self.runner_with_progress.percent_completed) - self.assertFalse(complete.is_set(), "Executor should not auto-complete a normal task") + assert len(samples) == 5 + assert self.runner_with_progress.completed + assert self.runner_with_progress.percent_completed == 1.0 + assert not complete.is_set(), "Executor should not auto-complete a normal task" previous_absolute_time = -1.0 previous_relative_time = -1.0 for sample in samples: - self.assertEqual(2, sample.client_id) - self.assertEqual(task, sample.task) - self.assertLess(previous_absolute_time, sample.absolute_time) + assert sample.client_id == 2 + assert sample.task == task + assert previous_absolute_time < sample.absolute_time previous_absolute_time = sample.absolute_time - self.assertLess(previous_relative_time, sample.relative_time) + assert previous_relative_time < sample.relative_time previous_relative_time = sample.relative_time # we don't have any warmup time period - self.assertEqual(metrics.SampleType.Normal, sample.sample_type) + assert sample.sample_type == metrics.SampleType.Normal # throughput is not overridden and will be calculated later - self.assertIsNone(sample.throughput) + assert sample.throughput is None # latency equals service time in throughput mode - self.assertEqual(sample.latency, sample.service_time) - self.assertEqual(1, sample.total_ops) - self.assertEqual("ops", sample.total_ops_unit) + assert sample.service_time == sample.latency + assert sample.total_ops == 1 + assert sample.total_ops_unit == "ops" @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -1424,19 +1425,19 @@ async def test_execute_schedule_runner_overrides_times(self, es): samples = sampler.samples - self.assertFalse(complete.is_set(), "Executor should not auto-complete a normal task") - self.assertEqual(1, len(samples)) + assert not complete.is_set(), "Executor should not auto-complete a normal task" + assert len(samples) == 1 sample = samples[0] - self.assertEqual(0, sample.client_id) - self.assertEqual(task, sample.task) + assert sample.client_id == 0 + assert sample.task == task # we don't have any warmup samples - self.assertEqual(metrics.SampleType.Normal, sample.sample_type) - self.assertEqual(sample.latency, sample.service_time) - self.assertEqual(1, sample.total_ops) - self.assertEqual("ops", sample.total_ops_unit) - self.assertEqual(1.23, sample.throughput) - self.assertIsNotNone(sample.service_time) - self.assertIsNotNone(sample.time_period) + assert sample.sample_type == metrics.SampleType.Normal + assert sample.service_time == sample.latency + assert sample.total_ops == 1 + assert sample.total_ops_unit == "ops" + assert sample.throughput == 1.23 + assert sample.service_time is not None + assert sample.time_period is not None @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -1501,9 +1502,9 @@ def perform_request(*args, **kwargs): sample_size = len(samples) lower_bound = bounds[0] upper_bound = bounds[1] - self.assertTrue(lower_bound <= sample_size <= upper_bound, - msg="Expected sample size to be between %d and %d but was %d" % (lower_bound, upper_bound, sample_size)) - self.assertTrue(complete.is_set(), "Executor should auto-complete a task that terminates its parent") + assert lower_bound <= sample_size <= upper_bound, \ + "Expected sample size to be between %d and %d but was %d" % (lower_bound, upper_bound, sample_size) + assert complete.is_set(), "Executor should auto-complete a task that terminates its parent" @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -1559,7 +1560,7 @@ async def test_cancel_execute_schedule(self, es): samples = sampler.samples sample_size = len(samples) - self.assertEqual(0, sample_size) + assert sample_size == 0 @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -1610,10 +1611,10 @@ async def __call__(self): complete=complete, on_error="continue") - with self.assertRaisesRegex(exceptions.RallyError, r"Cannot run task \[no-op\]: expected unit test exception"): + with pytest.raises(exceptions.RallyError, match=r"Cannot run task \[no-op\]: expected unit test exception"): await execute_schedule() - self.assertEqual(0, es.call_count) + assert es.call_count == 0 @run_async async def test_execute_single_no_return_value(self): @@ -1624,9 +1625,9 @@ async def test_execute_single_no_return_value(self): ops, unit, request_meta_data = await driver.execute_single(self.context_managed(runner), es, params, on_error="continue") - self.assertEqual(1, ops) - self.assertEqual("ops", unit) - self.assertEqual({"success": True}, request_meta_data) + assert ops == 1 + assert unit == "ops" + assert request_meta_data == {"success": True} @run_async async def test_execute_single_tuple(self): @@ -1637,9 +1638,9 @@ async def test_execute_single_tuple(self): ops, unit, request_meta_data = await driver.execute_single(self.context_managed(runner), es, params, on_error="continue") - self.assertEqual(500, ops) - self.assertEqual("MB", unit) - self.assertEqual({"success": True}, request_meta_data) + assert ops == 500 + assert unit == "MB" + assert request_meta_data == {"success": True} @run_async async def test_execute_single_dict(self): @@ -1655,13 +1656,13 @@ async def test_execute_single_dict(self): ops, unit, request_meta_data = await driver.execute_single(self.context_managed(runner), es, params, on_error="continue") - self.assertEqual(50, ops) - self.assertEqual("docs", unit) - self.assertEqual({ + assert ops == 50 + assert unit == "docs" + assert request_meta_data == { "some-custom-meta-data": "valid", "http-status": 200, "success": True - }, request_meta_data) + } @run_async async def test_execute_single_with_connection_error_always_aborts(self): @@ -1672,11 +1673,10 @@ async def test_execute_single_with_connection_error_always_aborts(self): # ES client uses pseudo-status "N/A" in this case... runner = mock.Mock(side_effect=as_future(exception=elasticsearch.ConnectionError("N/A", "no route to host", None))) - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: await driver.execute_single(self.context_managed(runner), es, params, on_error=on_error) - self.assertEqual( - "Request returned an error. Error type: transport, Description: no route to host", - ctx.exception.args[0]) + assert ctx.value.args[0] == \ + "Request returned an error. Error type: transport, Description: no route to host" @run_async async def test_execute_single_with_http_400_aborts_when_specified(self): @@ -1685,11 +1685,10 @@ async def test_execute_single_with_http_400_aborts_when_specified(self): runner = mock.Mock(side_effect= as_future(exception=elasticsearch.NotFoundError(404, "not found", "the requested document could not be found"))) - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: await driver.execute_single(self.context_managed(runner), es, params, on_error="abort") - self.assertEqual( - "Request returned an error. Error type: transport, Description: not found (the requested document could not be found)", - ctx.exception.args[0]) + assert ctx.value.args[0] == \ + "Request returned an error. Error type: transport, Description: not found (the requested document could not be found)" @run_async @@ -1702,14 +1701,14 @@ async def test_execute_single_with_http_400(self): ops, unit, request_meta_data = await driver.execute_single( self.context_managed(runner), es, params, on_error="continue") - self.assertEqual(0, ops) - self.assertEqual("ops", unit) - self.assertEqual({ + assert ops == 0 + assert unit == "ops" + assert request_meta_data == { "http-status": 404, "error-type": "transport", "error-description": "not found (the requested document could not be found)", "success": False - }, request_meta_data) + } @run_async async def test_execute_single_with_http_413(self): @@ -1721,14 +1720,14 @@ async def test_execute_single_with_http_413(self): ops, unit, request_meta_data = await driver.execute_single( self.context_managed(runner), es, params, on_error="continue") - self.assertEqual(0, ops) - self.assertEqual("ops", unit) - self.assertEqual({ + assert ops == 0 + assert unit == "ops" + assert request_meta_data == { "http-status": 413, "error-type": "transport", "error-description": "", "success": False - }, request_meta_data) + } @run_async async def test_execute_single_with_key_error(self): @@ -1746,11 +1745,10 @@ def __str__(self): params["mode"] = "append" runner = FailingRunner() - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: await driver.execute_single(self.context_managed(runner), es, params, on_error="continue") - self.assertEqual( - "Cannot execute [failing_mock_runner]. Provided parameters are: ['bulk', 'mode']. Error: ['bulk-size missing'].", - ctx.exception.args[0]) + assert ctx.value.args[0] == \ + "Cannot execute [failing_mock_runner]. Provided parameters are: ['bulk', 'mode']. Error: ['bulk-size missing']." class AsyncProfilerTests(TestCase): @@ -1765,6 +1763,6 @@ async def f(x): # this should take roughly 1 second and should return something return_value = await profiler(1) end = time.perf_counter() - self.assertEqual(2, return_value) + assert return_value == 2 duration = end - start - self.assertTrue(0.9 <= duration <= 1.2, "Should sleep for roughly 1 second but took [%.2f] seconds." % duration) + assert 0.9 <= duration <= 1.2, "Should sleep for roughly 1 second but took [%.2f] seconds." % duration diff --git a/tests/driver/runner_test.py b/tests/driver/runner_test.py index ff689e7fd..ad966d746 100644 --- a/tests/driver/runner_test.py +++ b/tests/driver/runner_test.py @@ -23,6 +23,7 @@ from unittest import TestCase import elasticsearch +import pytest from esrally import client, exceptions from esrally.driver import runner @@ -50,10 +51,9 @@ async def runner_function(*args): runner.register_runner(operation_type="unit_test", runner=runner_function, async_runner=True) returned_runner = runner.runner_for("unit_test") - self.assertIsInstance(returned_runner, runner.NoCompletion) - self.assertEqual("user-defined runner for [runner_function]", repr(returned_runner)) - self.assertEqual(("default_client", "param"), - await returned_runner({"default": "default_client", "other": "other_client"}, "param")) + assert isinstance(returned_runner, runner.NoCompletion) + assert repr(returned_runner) == "user-defined runner for [runner_function]" + assert await returned_runner({"default": "default_client", "other": "other_client"}, "param") == ("default_client", "param") @run_async async def test_single_cluster_runner_class_with_context_manager_should_be_wrapped_with_context_manager_enabled(self): @@ -67,15 +67,13 @@ def __str__(self): test_runner = UnitTestSingleClusterContextManagerRunner() runner.register_runner(operation_type="unit_test", runner=test_runner, async_runner=True) returned_runner = runner.runner_for("unit_test") - self.assertIsInstance(returned_runner, runner.NoCompletion) - self.assertEqual("user-defined context-manager enabled runner for [UnitTestSingleClusterContextManagerRunner]", - repr(returned_runner)) + assert isinstance(returned_runner, runner.NoCompletion) + assert repr(returned_runner) == "user-defined context-manager enabled runner for [UnitTestSingleClusterContextManagerRunner]" # test that context_manager functionality gets preserved after wrapping async with returned_runner: - self.assertEqual(("default_client", "param"), - await returned_runner({"default": "default_client", "other": "other_client"}, "param")) + assert await returned_runner({"default": "default_client", "other": "other_client"}, "param") == ("default_client", "param") # check that the context manager interface of our inner runner has been respected. - self.assertTrue(test_runner.fp.closed) + assert test_runner.fp.closed @run_async async def test_multi_cluster_runner_class_with_context_manager_should_be_wrapped_with_context_manager_enabled(self): @@ -91,16 +89,15 @@ def __str__(self): test_runner = UnitTestMultiClusterContextManagerRunner() runner.register_runner(operation_type="unit_test", runner=test_runner, async_runner=True) returned_runner = runner.runner_for("unit_test") - self.assertIsInstance(returned_runner, runner.NoCompletion) - self.assertEqual("user-defined context-manager enabled runner for [UnitTestMultiClusterContextManagerRunner]", - repr(returned_runner)) + assert isinstance(returned_runner, runner.NoCompletion) + assert repr(returned_runner) == "user-defined context-manager enabled runner for [UnitTestMultiClusterContextManagerRunner]" # test that context_manager functionality gets preserved after wrapping all_clients = {"default": "default_client", "other": "other_client"} async with returned_runner: - self.assertEqual((all_clients, "param1", "param2"), await returned_runner(all_clients, "param1", "param2")) + assert await returned_runner(all_clients, "param1", "param2") == (all_clients, "param1", "param2") # check that the context manager interface of our inner runner has been respected. - self.assertTrue(test_runner.fp.closed) + assert test_runner.fp.closed @run_async async def test_single_cluster_runner_class_should_be_wrapped(self): @@ -114,10 +111,9 @@ def __str__(self): test_runner = UnitTestSingleClusterRunner() runner.register_runner(operation_type="unit_test", runner=test_runner, async_runner=True) returned_runner = runner.runner_for("unit_test") - self.assertIsInstance(returned_runner, runner.NoCompletion) - self.assertEqual("user-defined runner for [UnitTestSingleClusterRunner]", repr(returned_runner)) - self.assertEqual(("default_client", "param"), - await returned_runner({"default": "default_client", "other": "other_client"}, "param")) + assert isinstance(returned_runner, runner.NoCompletion) + assert repr(returned_runner) == "user-defined runner for [UnitTestSingleClusterRunner]" + assert await returned_runner({"default": "default_client", "other": "other_client"}, "param") == ("default_client", "param") @run_async async def test_multi_cluster_runner_class_should_be_wrapped(self): @@ -133,10 +129,10 @@ def __str__(self): test_runner = UnitTestMultiClusterRunner() runner.register_runner(operation_type="unit_test", runner=test_runner, async_runner=True) returned_runner = runner.runner_for("unit_test") - self.assertIsInstance(returned_runner, runner.NoCompletion) - self.assertEqual("user-defined runner for [UnitTestMultiClusterRunner]", repr(returned_runner)) + assert isinstance(returned_runner, runner.NoCompletion) + assert repr(returned_runner) == "user-defined runner for [UnitTestMultiClusterRunner]" all_clients = {"default": "default_client", "other": "other_client"} - self.assertEqual((all_clients, "some_param"), await returned_runner(all_clients, "some_param")) + assert await returned_runner(all_clients, "some_param") == (all_clients, "some_param") class AssertingRunnerTests(TestCase): @@ -177,7 +173,7 @@ async def test_asserts_equal_succeeds(self): ] }) - self.assertEqual(response, final_response) + assert final_response == response @run_async async def test_asserts_equal_fails(self): @@ -193,8 +189,7 @@ async def test_asserts_equal_fails(self): delegate = mock.MagicMock() delegate.return_value = as_future(response) r = runner.AssertingRunner(delegate) - with self.assertRaisesRegex(exceptions.RallyTaskAssertionError, - r"Expected \[hits.hits.relation\] in \[test-task\] to be == \[eq\] but was \[gte\]."): + with pytest.raises(exceptions.RallyTaskAssertionError, match=r"Expected \[hits.hits.relation\] in \[test-task\] to be == \[eq\] but was \[gte\]."): async with r: await r(es, { "name": "test-task", @@ -231,11 +226,11 @@ async def test_skips_asserts_for_non_dicts(self): ] }) # still passes response as is - self.assertEqual(response, final_response) + assert final_response == response def test_predicates(self): r = runner.AssertingRunner(delegate=None) - self.assertEqual(5, len(r.predicates)) + assert len(r.predicates) == 5 predicate_success = { # predicate: (expected, actual) @@ -248,8 +243,8 @@ def test_predicates(self): for predicate, vals in predicate_success.items(): expected, actual = vals - self.assertTrue(r.predicates[predicate](expected, actual), - f"Expected [{expected} {predicate} {actual}] to succeed.") + assert r.predicates[predicate](expected, actual), \ + f"Expected [{expected} {predicate} {actual}] to succeed." predicate_fail = { # predicate: (expected, actual) @@ -262,8 +257,8 @@ def test_predicates(self): for predicate, vals in predicate_fail.items(): expected, actual = vals - self.assertFalse(r.predicates[predicate](expected, actual), - f"Expected [{expected} {predicate} {actual}] to fail.") + assert not r.predicates[predicate](expected, actual), \ + f"Expected [{expected} {predicate} {actual}] to fail." class SelectiveJsonParserTests(TestCase): @@ -290,9 +285,9 @@ def test_parse_all_expected(self): "meta.date.month" ]) - self.assertEqual("Hello", parsed.get("title")) - self.assertEqual(2000, parsed.get("meta.date.year")) - self.assertNotIn("meta.date.month", parsed) + assert parsed.get("title") == "Hello" + assert parsed.get("meta.date.year") == 2000 + assert "meta.date.month" not in parsed def test_list_length(self): doc = self.doc_as_text({ @@ -330,14 +325,14 @@ def test_list_length(self): "meta.date.month" ], ["authors", "readers", "supporters"]) - self.assertEqual("Hello", parsed.get("title")) - self.assertEqual(2000, parsed.get("meta.date.year")) - self.assertNotIn("meta.date.month", parsed) + assert parsed.get("title") == "Hello" + assert parsed.get("meta.date.year") == 2000 + assert "meta.date.month" not in parsed # lists - self.assertFalse(parsed.get("authors")) - self.assertFalse(parsed.get("readers")) - self.assertTrue(parsed.get("supporters")) + assert not parsed.get("authors") + assert not parsed.get("readers") + assert parsed.get("supporters") class BulkIndexRunnerTests(TestCase): @@ -361,11 +356,11 @@ async def test_bulk_index_missing_params(self, es): "index_line\n" } - with self.assertRaises(exceptions.DataError) as ctx: + with pytest.raises(exceptions.DataError) as ctx: await bulk(es, bulk_params) - self.assertEqual( - "Parameter source for operation 'bulk-index' did not provide the mandatory parameter 'action-metadata-present'. " - "Add it to your parameter source and try again.", ctx.exception.args[0]) + assert ctx.value.args[0] == \ + "Parameter source for operation 'bulk-index' did not provide the mandatory parameter 'action-metadata-present'. " \ + "Add it to your parameter source and try again." @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -392,13 +387,13 @@ async def test_bulk_index_success_with_metadata(self, es): result = await bulk(es, bulk_params) - self.assertEqual(8, result["took"]) - self.assertIsNone(result["index"]) - self.assertEqual(3, result["weight"]) - self.assertEqual("docs", result["unit"]) - self.assertEqual(True, result["success"]) - self.assertEqual(0, result["error-count"]) - self.assertFalse("error-type" in result) + assert result["took"] == 8 + assert result["index"] is None + assert result["weight"] == 3 + assert result["unit"] == "docs" + assert result["success"] is True + assert result["error-count"] == 0 + assert "error-type" not in result es.bulk.assert_called_with(body=bulk_params["body"], params={}) @@ -429,12 +424,12 @@ async def test_simple_bulk_with_timeout_and_headers(self, es): result = await bulk(es, bulk_params) - self.assertEqual(8, result["took"]) - self.assertEqual(3, result["weight"]) - self.assertEqual("docs", result["unit"]) - self.assertEqual(True, result["success"]) - self.assertEqual(0, result["error-count"]) - self.assertFalse("error-type" in result) + assert result["took"] == 8 + assert result["weight"] == 3 + assert result["unit"] == "docs" + assert result["success"] is True + assert result["error-count"] == 0 + assert "error-type" not in result es.bulk.assert_called_with(doc_type="_doc", params={}, @@ -467,13 +462,13 @@ async def test_bulk_index_success_without_metadata_with_doc_type(self, es): result = await bulk(es, bulk_params) - self.assertEqual(8, result["took"]) - self.assertEqual("test-index", result["index"]) - self.assertEqual(3, result["weight"]) - self.assertEqual("docs", result["unit"]) - self.assertEqual(True, result["success"]) - self.assertEqual(0, result["error-count"]) - self.assertFalse("error-type" in result) + assert result["took"] == 8 + assert result["index"] == "test-index" + assert result["weight"] == 3 + assert result["unit"] == "docs" + assert result["success"] is True + assert result["error-count"] == 0 + assert "error-type" not in result es.bulk.assert_called_with(body=bulk_params["body"], index="test-index", doc_type="_doc", params={}) @@ -499,13 +494,13 @@ async def test_bulk_index_success_without_metadata_and_without_doc_type(self, es result = await bulk(es, bulk_params) - self.assertEqual(8, result["took"]) - self.assertEqual("test-index", result["index"]) - self.assertEqual(3, result["weight"]) - self.assertEqual("docs", result["unit"]) - self.assertEqual(True, result["success"]) - self.assertEqual(0, result["error-count"]) - self.assertFalse("error-type" in result) + assert result["took"] == 8 + assert result["index"] == "test-index" + assert result["weight"] == 3 + assert result["unit"] == "docs" + assert result["success"] is True + assert result["error-count"] == 0 + assert "error-type" not in result es.bulk.assert_called_with(body=bulk_params["body"], index="test-index", doc_type=None, params={}) @@ -568,13 +563,13 @@ async def test_bulk_index_error(self, es): result = await bulk(es, bulk_params) - self.assertEqual("test", result["index"]) - self.assertEqual(5, result["took"]) - self.assertEqual(3, result["weight"]) - self.assertEqual("docs", result["unit"]) - self.assertEqual(False, result["success"]) - self.assertEqual(2, result["error-count"]) - self.assertEqual("bulk", result["error-type"]) + assert result["index"] == "test" + assert result["took"] == 5 + assert result["weight"] == 3 + assert result["unit"] == "docs" + assert result["success"] is False + assert result["error-count"] == 2 + assert result["error-type"] == "bulk" es.bulk.assert_called_with(body=bulk_params["body"], params={}) @@ -635,13 +630,13 @@ async def test_bulk_index_error_no_shards(self, es): result = await bulk(es, bulk_params) - self.assertEqual("test", result["index"]) - self.assertEqual(20, result["took"]) - self.assertEqual(3, result["weight"]) - self.assertEqual("docs", result["unit"]) - self.assertEqual(False, result["success"]) - self.assertEqual(3, result["error-count"]) - self.assertEqual("bulk", result["error-type"]) + assert result["index"] == "test" + assert result["took"] == 20 + assert result["weight"] == 3 + assert result["unit"] == "docs" + assert result["success"] is False + assert result["error-count"] == 3 + assert result["error-type"] == "bulk" es.bulk.assert_called_with(body=bulk_params["body"], params={}) @@ -742,14 +737,14 @@ async def test_mixed_bulk_with_simple_stats(self, es): result = await bulk(es, bulk_params) - self.assertEqual("test", result["index"]) - self.assertEqual(30, result["took"]) - self.assertNotIn("ingest_took", result, "ingest_took is not extracted with simple stats") - self.assertEqual(4, result["weight"]) - self.assertEqual("docs", result["unit"]) - self.assertEqual(False, result["success"]) - self.assertEqual(2, result["error-count"]) - self.assertEqual("bulk", result["error-type"]) + assert result["index"] == "test" + assert result["took"] == 30 + assert "ingest_took" not in result, "ingest_took is not extracted with simple stats" + assert result["weight"] == 4 + assert result["unit"] == "docs" + assert result["success"] is False + assert result["error-count"] == 2 + assert result["error-type"] == "bulk" es.bulk.assert_called_with(body=bulk_params["body"], params={}) @@ -887,15 +882,15 @@ async def test_mixed_bulk_with_detailed_stats_body_as_string(self, es): result = await bulk(es, bulk_params) - self.assertEqual("test", result["index"]) - self.assertEqual(30, result["took"]) - self.assertEqual(20, result["ingest_took"]) - self.assertEqual(6, result["weight"]) - self.assertEqual("docs", result["unit"]) - self.assertEqual(False, result["success"]) - self.assertEqual(3, result["error-count"]) - self.assertEqual("bulk", result["error-type"]) - self.assertEqual( + assert result["index"] == "test" + assert result["took"] == 30 + assert result["ingest_took"] == 20 + assert result["weight"] == 6 + assert result["unit"] == "docs" + assert result["success"] is False + assert result["error-count"] == 3 + assert result["error-type"] == "bulk" + assert result["ops"] == \ { "index": { "item-count": 4, @@ -908,8 +903,8 @@ async def test_mixed_bulk_with_detailed_stats_body_as_string(self, es): "updated": 1, "noop": 1 } - }, result["ops"]) - self.assertEqual( + } + assert result["shards_histogram"] == \ [ { "item-count": 3, @@ -935,15 +930,15 @@ async def test_mixed_bulk_with_detailed_stats_body_as_string(self, es): "failed": 1 } } - ], result["shards_histogram"]) - self.assertEqual(582, result["bulk-request-size-bytes"]) - self.assertEqual(234, result["total-document-size-bytes"]) + ] + assert result["bulk-request-size-bytes"] == 582 + assert result["total-document-size-bytes"] == 234 es.bulk.assert_called_with(body=bulk_params["body"], params={}) es.bulk.return_value.result().pop("ingest_took") result = await bulk(es, bulk_params) - self.assertNotIn("ingest_took", result) + assert "ingest_took" not in result @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -986,21 +981,21 @@ async def test_simple_bulk_with_detailed_stats_body_as_list(self, es): result = await bulk(es, bulk_params) - self.assertEqual("test", result["index"]) - self.assertEqual(30, result["took"]) - self.assertEqual(20, result["ingest_took"]) - self.assertEqual(1, result["weight"]) - self.assertEqual("docs", result["unit"]) - self.assertEqual(True, result["success"]) - self.assertEqual(0, result["error-count"]) - self.assertEqual( + assert result["index"] == "test" + assert result["took"] == 30 + assert result["ingest_took"] == 20 + assert result["weight"] == 1 + assert result["unit"] == "docs" + assert result["success"] is True + assert result["error-count"] == 0 + assert result["ops"] == \ { "index": { "item-count": 1, "created": 1 }, - }, result["ops"]) - self.assertEqual( + } + assert result["shards_histogram"] == \ [ { "item-count": 1, @@ -1010,15 +1005,15 @@ async def test_simple_bulk_with_detailed_stats_body_as_list(self, es): "failed": 0 } } - ], result["shards_histogram"]) - self.assertEqual(93, result["bulk-request-size-bytes"]) - self.assertEqual(39, result["total-document-size-bytes"]) + ] + assert result["bulk-request-size-bytes"] == 93 + assert result["total-document-size-bytes"] == 39 es.bulk.assert_called_with(body=bulk_params["body"], params={}) es.bulk.return_value.result().pop("ingest_took") result = await bulk(es, bulk_params) - self.assertNotIn("ingest_took", result) + assert "ingest_took" not in result @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -1061,7 +1056,7 @@ async def test_simple_bulk_with_detailed_stats_body_as_unrecognized_type(self, e "index": "test" } - with self.assertRaisesRegex(exceptions.DataError, "bulk body is neither string nor list"): + with pytest.raises(exceptions.DataError, match="bulk body is neither string nor list"): await bulk(es, bulk_params) es.bulk.assert_called_with(body=bulk_params["body"], params={}) @@ -1233,9 +1228,9 @@ async def test_indices_stats_without_parameters(self, es): es.indices.stats.return_value = as_future({}) indices_stats = runner.IndicesStats() result = await indices_stats(es, params={}) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertTrue(result["success"]) + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert result["success"] es.indices.stats.assert_called_once_with(index="_all", metric="_all") @@ -1247,9 +1242,9 @@ async def test_indices_stats_with_timeout_and_headers(self, es): result = await indices_stats(es, params={"request-timeout": 3.0, "headers": {"header1": "value1"}, "opaque-id": "test-id1"}) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertTrue(result["success"]) + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert result["success"] es.indices.stats.assert_called_once_with(index="_all", metric="_all", @@ -1280,14 +1275,14 @@ async def test_indices_stats_with_failed_condition(self, es): "expected-value": 0 } }) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertFalse(result["success"]) - self.assertDictEqual({ + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert not result["success"] + assert result["condition"] == { "path": "_all.total.merges.current", "actual-value": "2", "expected-value": "0" - }, result["condition"]) + } es.indices.stats.assert_called_once_with(index="logs-*", metric="_all") @@ -1314,14 +1309,14 @@ async def test_indices_stats_with_successful_condition(self, es): "expected-value": 0 } }) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertTrue(result["success"]) - self.assertDictEqual({ + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert result["success"] + assert result["condition"] == { "path": "_all.total.merges.current", "actual-value": "0", "expected-value": "0" - }, result["condition"]) + } es.indices.stats.assert_called_once_with(index="logs-*", metric="_all") @@ -1348,14 +1343,14 @@ async def test_indices_stats_with_non_existing_path(self, es): "expected-value": 0 } }) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertFalse(result["success"]) - self.assertDictEqual({ + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert not result["success"] + assert result["condition"] == { "path": "indices.my_index.total.docs.count", "actual-value": None, "expected-value": "0" - }, result["condition"]) + } es.indices.stats.assert_called_once_with(index="logs-*", metric="_all") @@ -1400,13 +1395,13 @@ async def test_query_match_only_request_body_defined(self, es): async with query_runner: result = await query_runner(es, params) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertEqual(1, result["hits"]) - self.assertEqual("gte", result["hits_relation"]) - self.assertFalse(result["timed_out"]) - self.assertEqual(5, result["took"]) - self.assertFalse("error-type" in result) + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert result["hits"] == 1 + assert result["hits_relation"] == "gte" + assert not result["timed_out"] + assert result["took"] == 5 + assert "error-type" not in result es.transport.perform_request.assert_called_once_with( "GET", @@ -1459,13 +1454,13 @@ async def test_query_with_timeout_and_headers(self, es): async with query_runner: result = await query_runner(es, params) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertEqual(1, result["hits"]) - self.assertEqual("gte", result["hits_relation"]) - self.assertFalse(result["timed_out"]) - self.assertEqual(5, result["took"]) - self.assertFalse("error-type" in result) + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert result["hits"] == 1 + assert result["hits_relation"] == "gte" + assert not result["timed_out"] + assert result["took"] == 5 + assert "error-type" not in result es.transport.perform_request.assert_called_once_with( "GET", @@ -1514,13 +1509,13 @@ async def test_query_match_using_request_params(self, es): async with query_runner: result = await query_runner(es, params) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertEqual(2, result["hits"]) - self.assertEqual("eq", result["hits_relation"]) - self.assertFalse(result["timed_out"]) - self.assertEqual(62, result["took"]) - self.assertFalse("error-type" in result) + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert result["hits"] == 2 + assert result["hits_relation"] == "eq" + assert not result["timed_out"] + assert result["took"] == 62 + assert "error-type" not in result es.transport.perform_request.assert_called_once_with( "GET", @@ -1571,13 +1566,13 @@ async def test_query_no_detailed_results(self, es): async with query_runner: result = await query_runner(es, params) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertNotIn("hits", result) - self.assertNotIn("hits_relation", result) - self.assertNotIn("timed_out", result) - self.assertNotIn("took", result) - self.assertNotIn("error-type", result) + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert "hits" not in result + assert "hits_relation" not in result + assert "timed_out" not in result + assert "took" not in result + assert "error-type" not in result es.transport.perform_request.assert_called_once_with( "GET", @@ -1624,13 +1619,13 @@ async def test_query_hits_total_as_number(self, es): async with query_runner: result = await query_runner(es, params) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertEqual(2, result["hits"]) - self.assertEqual("eq", result["hits_relation"]) - self.assertFalse(result["timed_out"]) - self.assertEqual(5, result["took"]) - self.assertFalse("error-type" in result) + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert result["hits"] == 2 + assert result["hits_relation"] == "eq" + assert not result["timed_out"] + assert result["took"] == 5 + assert "error-type" not in result es.transport.perform_request.assert_called_once_with( "GET", @@ -1682,13 +1677,13 @@ async def test_query_match_all(self, es): async with query_runner: result = await query_runner(es, params) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertEqual(2, result["hits"]) - self.assertEqual("eq", result["hits_relation"]) - self.assertFalse(result["timed_out"]) - self.assertEqual(5, result["took"]) - self.assertFalse("error-type" in result) + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert result["hits"] == 2 + assert result["hits_relation"] == "eq" + assert not result["timed_out"] + assert result["took"] == 5 + assert "error-type" not in result es.transport.perform_request.assert_called_once_with( "GET", @@ -1740,13 +1735,13 @@ async def test_query_match_all_doc_type_fallback(self, es): async with query_runner: result = await query_runner(es, params) - self.assertEqual(1, result["weight"]) - self.assertEqual("ops", result["unit"]) - self.assertEqual(2, result["hits"]) - self.assertEqual("eq", result["hits_relation"]) - self.assertFalse(result["timed_out"]) - self.assertEqual(5, result["took"]) - self.assertFalse("error-type" in result) + assert result["weight"] == 1 + assert result["unit"] == "ops" + assert result["hits"] == 2 + assert result["hits_relation"] == "eq" + assert not result["timed_out"] + assert result["took"] == 5 + assert "error-type" not in result es.transport.perform_request.assert_called_once_with( "GET", "/unittest/type/_search", @@ -1800,14 +1795,14 @@ async def test_scroll_query_only_one_page(self, es): async with query_runner: results = await query_runner(es, params) - self.assertEqual(1, results["weight"]) - self.assertEqual(1, results["pages"]) - self.assertEqual(2, results["hits"]) - self.assertEqual("eq", results["hits_relation"]) - self.assertEqual(4, results["took"]) - self.assertEqual("pages", results["unit"]) - self.assertFalse(results["timed_out"]) - self.assertFalse("error-type" in results) + assert results["weight"] == 1 + assert results["pages"] == 1 + assert results["hits"] == 2 + assert results["hits_relation"] == "eq" + assert results["took"] == 4 + assert results["unit"] == "pages" + assert not results["timed_out"] + assert "error-type" not in results es.transport.perform_request.assert_called_once_with( "GET", @@ -1862,14 +1857,14 @@ async def test_scroll_query_no_request_cache(self, es): async with query_runner: results = await query_runner(es, params) - self.assertEqual(1, results["weight"]) - self.assertEqual(1, results["pages"]) - self.assertEqual(2, results["hits"]) - self.assertEqual("eq", results["hits_relation"]) - self.assertEqual(4, results["took"]) - self.assertEqual("pages", results["unit"]) - self.assertFalse(results["timed_out"]) - self.assertFalse("error-type" in results) + assert results["weight"] == 1 + assert results["pages"] == 1 + assert results["hits"] == 2 + assert results["hits_relation"] == "eq" + assert results["took"] == 4 + assert results["unit"] == "pages" + assert not results["timed_out"] + assert "error-type" not in results es.transport.perform_request.assert_called_once_with( "GET", @@ -1923,14 +1918,14 @@ async def test_scroll_query_only_one_page_only_request_body_defined(self, es): async with query_runner: results = await query_runner(es, params) - self.assertEqual(1, results["weight"]) - self.assertEqual(1, results["pages"]) - self.assertEqual(2, results["hits"]) - self.assertEqual("eq", results["hits_relation"]) - self.assertEqual(4, results["took"]) - self.assertEqual("pages", results["unit"]) - self.assertFalse(results["timed_out"]) - self.assertFalse("error-type" in results) + assert results["weight"] == 1 + assert results["pages"] == 1 + assert results["hits"] == 2 + assert results["hits_relation"] == "eq" + assert results["took"] == 4 + assert results["unit"] == "pages" + assert not results["timed_out"] + assert "error-type" not in results es.transport.perform_request.assert_called_once_with( "GET", @@ -2005,14 +2000,14 @@ async def test_scroll_query_with_explicit_number_of_pages(self, es): async with query_runner: results = await query_runner(es, params) - self.assertEqual(2, results["weight"]) - self.assertEqual(2, results["pages"]) - self.assertEqual(3, results["hits"]) - self.assertEqual("eq", results["hits_relation"]) - self.assertEqual(79, results["took"]) - self.assertEqual("pages", results["unit"]) - self.assertTrue(results["timed_out"]) - self.assertFalse("error-type" in results) + assert results["weight"] == 2 + assert results["pages"] == 2 + assert results["hits"] == 3 + assert results["hits_relation"] == "eq" + assert results["took"] == 79 + assert results["unit"] == "pages" + assert results["timed_out"] + assert "error-type" not in results es.clear_scroll.assert_called_once_with(body={"scroll_id": ["some-scroll-id"]}) @@ -2057,13 +2052,13 @@ async def test_scroll_query_cannot_clear_scroll(self, es): async with query_runner: results = await query_runner(es, params) - self.assertEqual(1, results["weight"]) - self.assertEqual(1, results["pages"]) - self.assertEqual(1, results["hits"]) - self.assertEqual("eq", results["hits_relation"]) - self.assertEqual("pages", results["unit"]) - self.assertEqual(53, results["took"]) - self.assertFalse("error-type" in results) + assert results["weight"] == 1 + assert results["pages"] == 1 + assert results["hits"] == 1 + assert results["hits_relation"] == "eq" + assert results["unit"] == "pages" + assert results["took"] == 53 + assert "error-type" not in results es.clear_scroll.assert_called_once_with(body={"scroll_id": ["some-scroll-id"]}) @@ -2130,14 +2125,14 @@ async def test_scroll_query_request_all_pages(self, es): async with query_runner: results = await query_runner(es, params) - self.assertEqual(2, results["weight"]) - self.assertEqual(2, results["pages"]) - self.assertEqual(4, results["hits"]) - self.assertEqual("gte", results["hits_relation"]) - self.assertEqual(878, results["took"]) - self.assertEqual("pages", results["unit"]) - self.assertFalse(results["timed_out"]) - self.assertFalse("error-type" in results) + assert results["weight"] == 2 + assert results["pages"] == 2 + assert results["hits"] == 4 + assert results["hits_relation"] == "gte" + assert results["took"] == 878 + assert results["unit"] == "pages" + assert not results["timed_out"] + assert "error-type" not in results es.clear_scroll.assert_called_once_with(body={"scroll_id": ["some-scroll-id"]}) @@ -2179,12 +2174,11 @@ async def test_param_body_mandatory(self, es): params = { "id": "rename" } - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'put-pipeline' did not provide the mandatory parameter 'body'. " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'put-pipeline' did not provide the mandatory parameter 'body'. " "Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.ingest.put_pipeline.call_count) + assert es.ingest.put_pipeline.call_count == 0 @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -2196,12 +2190,11 @@ async def test_param_id_mandatory(self, es): params = { "body": {} } - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'put-pipeline' did not provide the mandatory parameter 'id'. " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'put-pipeline' did not provide the mandatory parameter 'id'. " "Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.ingest.put_pipeline.call_count) + assert es.ingest.put_pipeline.call_count == 0 class ClusterHealthRunnerTests(TestCase): @@ -2222,13 +2215,13 @@ async def test_waits_for_expected_cluster_status(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True, "cluster-status": "green", "relocating-shards": 0 - }, result) + } es.cluster.health.assert_called_once_with(params={"wait_for_status": "green"}) @@ -2249,13 +2242,13 @@ async def test_accepts_better_cluster_status(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True, "cluster-status": "green", "relocating-shards": 0 - }, result) + } es.cluster.health.assert_called_once_with(params={"wait_for_status": "yellow"}) @@ -2279,13 +2272,13 @@ async def test_cluster_health_with_timeout_and_headers(self, es): result = await cluster_health_runner(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True, "cluster-status": "green", "relocating-shards": 0 - }, result) + } es.cluster.health.assert_called_once_with(headers={"header1": "value1"}, opaque_id="testid-1", @@ -2311,13 +2304,13 @@ async def test_rejects_relocating_shards(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": False, "cluster-status": "yellow", "relocating-shards": 3 - }, result) + } es.cluster.health.assert_called_once_with(index="logs-*", params={"wait_for_status": "red", "wait_for_no_relocating_shards": True}) @@ -2339,13 +2332,13 @@ async def test_rejects_unknown_cluster_status(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": False, "cluster-status": None, "relocating-shards": 0 - }, result) + } es.cluster.health.assert_called_once_with(params={"wait_for_status": "green"}) @@ -2372,11 +2365,11 @@ async def test_creates_multiple_indices(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 2, "unit": "ops", "success": True - }, result) + } es.indices.create.assert_has_calls([ mock.call(index="indexA", body={"settings": {}}, params=request_params), @@ -2406,11 +2399,11 @@ async def test_create_with_timeout_and_headers(self, es): result = await create_index_runner(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True - }, result) + } es.indices.create.assert_called_once_with(index="indexA", @@ -2442,11 +2435,11 @@ async def test_ignore_invalid_params(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True - }, result) + } es.indices.create.assert_called_once_with(index="indexA", body={"settings": {}}, @@ -2460,12 +2453,11 @@ async def test_param_indices_mandatory(self, es): r = runner.CreateIndex() params = {} - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'create-index' did not provide the mandatory parameter 'indices'. " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'create-index' did not provide the mandatory parameter 'indices'. " "Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.indices.create.call_count) + assert es.indices.create.call_count == 0 class CreateDataStreamRunnerTests(TestCase): @@ -2490,11 +2482,11 @@ async def test_creates_multiple_data_streams(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 2, "unit": "ops", "success": True - }, result) + } es.indices.create_data_stream.assert_has_calls([ mock.call("data-stream-A", params=request_params), @@ -2509,12 +2501,11 @@ async def test_param_data_streams_mandatory(self, es): r = runner.CreateDataStream() params = {} - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'create-data-stream' did not provide the " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'create-data-stream' did not provide the " "mandatory parameter 'data-streams'. Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.indices.create_data_stream.call_count) + assert es.indices.create_data_stream.call_count == 0 class DeleteIndexRunnerTests(TestCase): @@ -2535,11 +2526,11 @@ async def test_deletes_existing_indices(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True - }, result) + } es.cluster.put_settings.assert_has_calls([ mock.call(body={"transient": {"action.destructive_requires_name": False}}), @@ -2566,11 +2557,11 @@ async def test_deletes_all_indices(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 2, "unit": "ops", "success": True - }, result) + } es.cluster.put_settings.assert_has_calls([ mock.call(body={"transient": {"action.destructive_requires_name": False}}), @@ -2580,7 +2571,7 @@ async def test_deletes_all_indices(self, es): mock.call(index="indexA", params=params["request-params"]), mock.call(index="indexB", params=params["request-params"]) ]) - self.assertEqual(0, es.indices.exists.call_count) + assert es.indices.exists.call_count == 0 class DeleteDataStreamRunnerTests(TestCase): @@ -2600,11 +2591,11 @@ async def test_deletes_existing_data_streams(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True - }, result) + } es.indices.delete_data_stream.assert_called_once_with("data-stream-B", params={}) @@ -2626,17 +2617,17 @@ async def test_deletes_all_data_streams(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 2, "unit": "ops", "success": True - }, result) + } es.indices.delete_data_stream.assert_has_calls([ mock.call("data-stream-A", ignore=[404], params=params["request-params"]), mock.call("data-stream-B", ignore=[404], params=params["request-params"]) ]) - self.assertEqual(0, es.indices.exists.call_count) + assert es.indices.exists.call_count == 0 class CreateIndexTemplateRunnerTests(TestCase): @@ -2660,11 +2651,11 @@ async def test_create_index_templates(self, es): result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 2, "unit": "ops", "success": True - }, result) + } es.indices.put_template.assert_has_calls([ mock.call(name="templateA", body={"settings": {}}, params=params["request-params"]), @@ -2679,12 +2670,11 @@ async def test_param_templates_mandatory(self, es): r = runner.CreateIndexTemplate() params = {} - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'create-index-template' did not provide the mandatory parameter " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'create-index-template' did not provide the mandatory parameter " "'templates'. Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.indices.put_template.call_count) + assert es.indices.put_template.call_count == 0 class DeleteIndexTemplateRunnerTests(TestCase): @@ -2708,11 +2698,11 @@ async def test_deletes_all_index_templates(self, es): result = await r(es, params) # 2 times delete index template, one time delete matching indices - self.assertDictEqual({ + assert result == { "weight": 3, "unit": "ops", "success": True - }, result) + } es.indices.delete_template.assert_has_calls([ mock.call(name="templateA", params=params["request-params"]), @@ -2742,15 +2732,15 @@ async def test_deletes_only_existing_index_templates(self, es): result = await r(es, params) # 2 times delete index template, one time delete matching indices - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True - }, result) + } es.indices.delete_template.assert_called_once_with(name="templateB", params=params["request-params"]) # not called because the matching index is empty. - self.assertEqual(0, es.indices.delete.call_count) + assert es.indices.delete.call_count == 0 @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -2758,12 +2748,11 @@ async def test_param_templates_mandatory(self, es): r = runner.DeleteIndexTemplate() params = {} - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'delete-index-template' did not provide the mandatory parameter " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'delete-index-template' did not provide the mandatory parameter " "'templates'. Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.indices.delete_template.call_count) + assert es.indices.delete_template.call_count == 0 class CreateComponentTemplateRunnerTests(TestCase): @@ -2784,11 +2773,11 @@ async def test_create_index_templates(self, es): } result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 2, "unit": "ops", "success": True - }, result) + } es.cluster.put_component_template.assert_has_calls([ mock.call(name="templateA", body={"template":{"mappings":{"properties":{"@timestamp":{"type": "date"}}}}}, params=params["request-params"]), @@ -2804,12 +2793,11 @@ async def test_param_templates_mandatory(self, es): r = runner.CreateComponentTemplate() params = {} - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'create-component-template' did not provide the mandatory parameter " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'create-component-template' did not provide the mandatory parameter " "'templates'. Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.cluster.put_component_template.call_count) + assert es.cluster.put_component_template.call_count == 0 class DeleteComponentTemplateRunnerTests(TestCase): @@ -2832,11 +2820,11 @@ async def test_deletes_all_index_templates(self, es): "only-if-exists": False } result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 2, "unit": "ops", "success": True - }, result) + } es.cluster.delete_component_template.assert_has_calls([ mock.call(name="templateA", params=params["request-params"], ignore=[404]), @@ -2869,11 +2857,11 @@ def _side_effect(http_method, path): } result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True - }, result) + } es.cluster.delete_component_template.assert_called_once_with(name="templateB", params=params["request-params"]) @@ -2883,12 +2871,11 @@ async def test_param_templates_mandatory(self, es): r = runner.DeleteComponentTemplate() params = {} - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'delete-component-template' did not provide the mandatory parameter " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'delete-component-template' did not provide the mandatory parameter " "'templates'. Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.indices.delete_template.call_count) + assert es.indices.delete_template.call_count == 0 class CreateComposableTemplateRunnerTests(TestCase): @@ -2910,11 +2897,11 @@ async def test_create_index_templates(self, es): } result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 2, "unit": "ops", "success": True - }, result) + } es.cluster.put_index_template.assert_has_calls([ mock.call(name="templateA", body={"index_patterns":["logs-*"],"template":{"settings":{"index.number_of_shards":3}}, "composed_of":["ct1","ct2"]}, params=params["request-params"]), @@ -2930,12 +2917,11 @@ async def test_param_templates_mandatory(self, es): r = runner.CreateComposableTemplate() params = {} - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'create-composable-template' did not provide the mandatory parameter " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'create-composable-template' did not provide the mandatory parameter " "'templates'. Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.cluster.put_index_template.call_count) + assert es.cluster.put_index_template.call_count == 0 class DeleteComposableTemplateRunnerTests(TestCase): @@ -2960,11 +2946,11 @@ async def test_deletes_all_index_templates(self, es): result = await r(es, params) # 2 times delete index template, one time delete matching indices - self.assertDictEqual({ + assert result == { "weight": 3, "unit": "ops", "success": True - }, result) + } es.indices.delete_index_template.assert_has_calls([ mock.call(name="templateA", params=params["request-params"], ignore=[404]), @@ -2994,15 +2980,15 @@ async def test_deletes_only_existing_index_templates(self, es): result = await r(es, params) # 2 times delete index template, one time delete matching indices - self.assertDictEqual({ + assert result == { "weight": 1, "unit": "ops", "success": True - }, result) + } es.indices.delete_index_template.assert_called_once_with(name="templateB", params=params["request-params"]) # not called because the matching index is empty. - self.assertEqual(0, es.indices.delete.call_count) + assert es.indices.delete.call_count == 0 @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -3010,12 +2996,11 @@ async def test_param_templates_mandatory(self, es): r = runner.DeleteComposableTemplate() params = {} - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'delete-composable-template' did not provide the mandatory parameter " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'delete-composable-template' did not provide the mandatory parameter " "'templates'. Add it to your parameter source and try again."): await r(es, params) - self.assertEqual(0, es.indices.delete_index_template.call_count) + assert es.indices.delete_index_template.call_count == 0 class CreateMlDatafeedTests(TestCase): @@ -3389,9 +3374,9 @@ async def test_raises_missing_slash(self, es): } with mock.patch.object(r.logger, "error") as mocked_error_logger: - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: await r(es, params) - self.assertEqual("RawRequest [_cat/count] failed. Path parameter must begin with a '/'.", ctx.exception.args[0]) + assert ctx.value.args[0] == "RawRequest [_cat/count] failed. Path parameter must begin with a '/'." mocked_error_logger.assert_has_calls([ mock.call("RawRequest failed. Path parameter: [%s] must begin with a '/'.", params["path"]) ]) @@ -3539,15 +3524,14 @@ class SleepTests(TestCase): @run_async async def test_missing_parameter(self, sleep, es): r = runner.Sleep() - with self.assertRaisesRegex(exceptions.DataError, - "Parameter source for operation 'sleep' did not provide the mandatory parameter " + with pytest.raises(exceptions.DataError, match="Parameter source for operation 'sleep' did not provide the mandatory parameter " "'duration'. Add it to your parameter source and try again."): await r(es, params={}) - self.assertEqual(0, es.call_count) - self.assertEqual(1, es.on_request_start.call_count) - self.assertEqual(1, es.on_request_end.call_count) - self.assertEqual(0, sleep.call_count) + assert es.call_count == 0 + assert es.on_request_start.call_count == 1 + assert es.on_request_end.call_count == 1 + assert sleep.call_count == 0 @mock.patch("elasticsearch.Elasticsearch") # To avoid real sleeps in unit tests @@ -3557,9 +3541,9 @@ async def test_sleep(self, sleep, es): r = runner.Sleep() await r(es, params={"duration": 4.3}) - self.assertEqual(0, es.call_count) - self.assertEqual(1, es.on_request_start.call_count) - self.assertEqual(1, es.on_request_end.call_count) + assert es.call_count == 0 + assert es.on_request_start.call_count == 1 + assert es.on_request_end.call_count == 1 sleep.assert_called_once_with(4.3) @@ -3781,7 +3765,7 @@ async def test_wait_for_snapshot_create_entire_lifecycle(self, es): ignore_unavailable=True ) - self.assertDictEqual({ + assert result == { "weight": 243468188055, "unit": "byte", "success": True, @@ -3790,9 +3774,9 @@ async def test_wait_for_snapshot_create_entire_lifecycle(self, es): "throughput": 218658731.10622546, "start_time_millis": 1597317564956, "stop_time_millis": 1597317564956 + 1113462 - }, result) + } - self.assertEqual(3, es.snapshot.status.call_count) + assert es.snapshot.status.call_count == 3 @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -3825,7 +3809,7 @@ async def test_wait_for_snapshot_create_immediate_success(self, es): r = runner.WaitForSnapshotCreate() result = await r(es, params) - self.assertDictEqual({ + assert result == { "weight": 9399505, "unit": "byte", "success": True, @@ -3834,7 +3818,7 @@ async def test_wait_for_snapshot_create_immediate_success(self, es): "throughput": 46997525.0, "start_time_millis": 1591776481060, "stop_time_millis": 1591776481060 + 200 - }, result) + } es.snapshot.status.assert_called_once_with(repository="backups", snapshot="snapshot-001", @@ -3863,9 +3847,9 @@ async def test_wait_for_snapshot_create_failure(self, es): r = runner.WaitForSnapshotCreate() with mock.patch.object(r.logger, "error") as mocked_error_logger: - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: await r(es, params) - self.assertEqual("Snapshot [snapshot-001] failed. Please check logs.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Snapshot [snapshot-001] failed. Please check logs." mocked_error_logger.assert_has_calls([ mock.call("Snapshot [%s] failed. Response:\n%s", "snapshot-001", json.dumps(snapshot_status, indent=2)) ]) @@ -4065,17 +4049,17 @@ async def test_waits_for_ongoing_indices_recovery(self, es): }) # sum of both shards - self.assertEqual(237783878, result["weight"]) - self.assertEqual("byte", result["unit"]) - self.assertTrue(result["success"]) + assert result["weight"] == 237783878 + assert result["unit"] == "byte" + assert result["success"] # bytes recovered within these 5 seconds - self.assertEqual(47556775.6, result["throughput"]) - self.assertEqual(1393244155000, result["start_time_millis"]) - self.assertEqual(1393244160000, result["stop_time_millis"]) + assert result["throughput"] == 47556775.6 + assert result["start_time_millis"] == 1393244155000 + assert result["stop_time_millis"] == 1393244160000 es.indices.recovery.assert_called_with(index="index1") # retries four times - self.assertEqual(4, es.indices.recovery.call_count) + assert es.indices.recovery.call_count == 4 class ShrinkIndexTests(TestCase): @@ -4446,15 +4430,15 @@ async def test_wait_for_transform(self, es): }) r = runner.WaitForTransform() - self.assertFalse(r.completed) - self.assertEqual(r.percent_completed, 0.0) + assert not r.completed + assert r.percent_completed == 0.0 result = await r(es, params) - self.assertTrue(r.completed) - self.assertEqual(r.percent_completed, 1.0) - self.assertEqual(2, result["weight"], 2) - self.assertEqual(result["unit"], "docs") + assert r.completed + assert r.percent_completed == 1.0 + assert result["weight"] == 2, 2 + assert result["unit"] == "docs" es.transform.stop_transform.assert_called_once_with(transform_id=transform_id, force=params["force"], timeout=params["timeout"], @@ -4622,21 +4606,21 @@ async def test_wait_for_transform_progress(self, es): ] r = runner.WaitForTransform() - self.assertFalse(r.completed) - self.assertEqual(r.percent_completed, 0.0) + assert not r.completed + assert r.percent_completed == 0.0 total_calls = 0 while not r.completed: result = await r(es, params) total_calls += 1 if total_calls < 4: - self.assertAlmostEqual(r.percent_completed, (total_calls * 10.20) / 100.0) + assert round(abs(r.percent_completed-(total_calls * 10.20) / 100.0), 7) == 0 - self.assertEqual(total_calls, 4) - self.assertTrue(r.completed) - self.assertEqual(r.percent_completed, 1.0) - self.assertEqual(result["weight"], 60000) - self.assertEqual(result["unit"], "docs") + assert total_calls == 4 + assert r.completed + assert r.percent_completed == 1.0 + assert result["weight"] == 60000 + assert result["unit"] == "docs" es.transform.stop_transform.assert_called_once_with(transform_id=transform_id, force=params["force"], timeout=params["timeout"], @@ -4683,7 +4667,7 @@ async def test_submit_async_search(self, es): async with runner.CompositeContext(): await r(es, params) # search id is registered in context - self.assertEqual("12345", runner.CompositeContext.get("search-1")) + assert runner.CompositeContext.get("search-1") == "12345" es.async_search.submit.assert_called_once_with(body={ "query": { @@ -4717,7 +4701,7 @@ async def test_get_async_search(self, es): async with runner.CompositeContext(): runner.CompositeContext.put("search-1", "12345") response = await r(es, params) - self.assertDictEqual(response, { + assert { "weight": 1, "unit": "ops", "success": True, @@ -4729,7 +4713,7 @@ async def test_get_async_search(self, es): "took": 1122 } } - }) + } == response es.async_search.get.assert_called_once_with(id="12345", params={}) @@ -4774,7 +4758,7 @@ async def test_creates_point_in_time(self, es): r = runner.OpenPointInTime() async with runner.CompositeContext(): await r(es, params) - self.assertEqual(pit_id, runner.CompositeContext.get("open-pit-test")) + assert runner.CompositeContext.get("open-pit-test") == pit_id @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -4788,10 +4772,10 @@ async def test_can_only_be_run_in_composite(self, es): es.open_point_in_time.return_value = as_future({"id": pit_id}) r = runner.OpenPointInTime() - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: await r(es, params) - self.assertEqual("This operation is only allowed inside a composite operation.", ctx.exception.args[0]) + assert ctx.value.args[0] == "This operation is only allowed inside a composite operation." class ClosePointInTimeTests(TestCase): @mock.patch("elasticsearch.Elasticsearch") @@ -4879,7 +4863,7 @@ async def test_search_after_with_pit(self, es): runner.CompositeContext.put(pit_op, pit_id) await r(es, params) # make sure pit_id is updated afterward - self.assertEqual("fedcba9876543211", runner.CompositeContext.get(pit_op)) + assert runner.CompositeContext.get(pit_op) == "fedcba9876543211" es.transport.perform_request.assert_has_calls([mock.call("GET", "/_search", params={}, body={ @@ -5025,8 +5009,8 @@ def test_extract_all_properties(self): "timed_out": False, "took": 10} expected_sort_value = [1609780186, "2"] - self.assertEqual(expected_props, props) - self.assertEqual(expected_sort_value, last_sort) + assert props == expected_props + assert last_sort == expected_sort_value def test_extract_ignore_point_in_time(self): target = runner.SearchAfterExtractor() @@ -5036,8 +5020,8 @@ def test_extract_ignore_point_in_time(self): "timed_out": False, "took": 10} expected_sort_value = [1609780186, "2"] - self.assertEqual(expected_props, props) - self.assertEqual(expected_sort_value, last_sort) + assert props == expected_props + assert last_sort == expected_sort_value def test_extract_uses_provided_hits_total(self): target = runner.SearchAfterExtractor() @@ -5048,18 +5032,17 @@ def test_extract_uses_provided_hits_total(self): "timed_out": False, "took": 10} expected_sort_value = [1609780186, "2"] - self.assertEqual(expected_props, props) - self.assertEqual(expected_sort_value, last_sort) + assert props == expected_props + assert last_sort == expected_sort_value def test_extract_missing_required_point_in_time(self): response_copy = json.loads(self.response_text) del response_copy["pit_id"] response_copy_bytesio = io.BytesIO(json.dumps(response_copy).encode()) target = runner.SearchAfterExtractor() - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: target(response=response_copy_bytesio, get_point_in_time=True, hits_total=None) - self.assertEqual("Paginated query failure: pit_id was expected but not found in the response.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Paginated query failure: pit_id was expected but not found in the response." def test_extract_missing_ignored_point_in_time(self): response_copy = json.loads(self.response_text) @@ -5072,49 +5055,46 @@ def test_extract_missing_ignored_point_in_time(self): "timed_out": False, "took": 10} expected_sort_value = [1609780186, "2"] - self.assertEqual(expected_props, props) - self.assertEqual(expected_sort_value, last_sort) + assert props == expected_props + assert last_sort == expected_sort_value class CompositeContextTests(TestCase): def test_cannot_be_used_outside_of_composite(self): - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: runner.CompositeContext.put("test", 1) - self.assertEqual("This operation is only allowed inside a composite operation.", ctx.exception.args[0]) + assert ctx.value.args[0] == "This operation is only allowed inside a composite operation." @run_async async def test_put_get_and_remove(self): async with runner.CompositeContext(): runner.CompositeContext.put("test", 1) runner.CompositeContext.put("don't clear this key", 1) - self.assertEqual(runner.CompositeContext.get("test"), 1) + assert runner.CompositeContext.get("test") == 1 runner.CompositeContext.remove("test") # context is cleared properly async with runner.CompositeContext(): - with self.assertRaises(KeyError) as ctx: + with pytest.raises(KeyError) as ctx: runner.CompositeContext.get("don't clear this key") - self.assertEqual("Unknown property [don't clear this key]. Currently recognized properties are [].", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Unknown property [don't clear this key]. Currently recognized properties are []." @run_async async def test_fails_to_read_unknown_key(self): async with runner.CompositeContext(): - with self.assertRaises(KeyError) as ctx: + with pytest.raises(KeyError) as ctx: runner.CompositeContext.put("test", 1) runner.CompositeContext.get("unknown") - self.assertEqual("Unknown property [unknown]. Currently recognized properties are [test].", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Unknown property [unknown]. Currently recognized properties are [test]." @run_async async def test_fails_to_remove_unknown_key(self): async with runner.CompositeContext(): - with self.assertRaises(KeyError) as ctx: + with pytest.raises(KeyError) as ctx: runner.CompositeContext.put("test", 1) runner.CompositeContext.remove("unknown") - self.assertEqual("Unknown property [unknown]. Currently recognized properties are [test].", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Unknown property [unknown]. Currently recognized properties are [test]." class CompositeTests(TestCase): @@ -5278,8 +5258,7 @@ async def test_propagates_violated_assertions(self, es): } r = runner.Composite() - with self.assertRaisesRegex(exceptions.RallyTaskAssertionError, - r"Expected \[hits\] to be > \[0\] but was \[0\]."): + with pytest.raises(exceptions.RallyTaskAssertionError, match=r"Expected \[hits\] to be > \[0\] but was \[0\]."): await r(es, params) es.transport.perform_request.assert_has_calls([ @@ -5353,7 +5332,7 @@ async def test_executes_tasks_in_specified_order(self, es): r.supported_op_types = ["call-recorder"] await r(es, params) - self.assertEqual([ + assert self.call_recorder_runner.calls == [ "initial-call", # concurrent "stream-a", "stream-b", @@ -5361,7 +5340,7 @@ async def test_executes_tasks_in_specified_order(self, es): # concurrent "stream-c", "stream-d", "call-after-stream-cd" - ], self.call_recorder_runner.calls) + ] @run_async async def test_adds_request_timings(self): @@ -5400,27 +5379,27 @@ async def test_adds_request_timings(self): r = runner.Composite() response = await r(es, params) - self.assertEqual(1, response["weight"]) - self.assertEqual("ops", response["unit"]) + assert response["weight"] == 1 + assert response["unit"] == "ops" timings = response["dependent_timing"] - self.assertEqual(3, len(timings)) + assert len(timings) == 3 - self.assertEqual("initial-call", timings[0]["operation"]) - self.assertAlmostEqual(0.1, timings[0]["service_time"], delta=0.05) + assert timings[0]["operation"] == "initial-call" + assert abs(0.1-timings[0]["service_time"]) < 0.05 - self.assertEqual("stream-a", timings[1]["operation"]) - self.assertAlmostEqual(0.2, timings[1]["service_time"], delta=0.05) + assert timings[1]["operation"] == "stream-a" + assert abs(0.2-timings[1]["service_time"]) < 0.05 - self.assertEqual("stream-b", timings[2]["operation"]) - self.assertAlmostEqual(0.1, timings[2]["service_time"], delta=0.05) + assert timings[2]["operation"] == "stream-b" + assert abs(0.1-timings[2]["service_time"]) < 0.05 # common properties for timing in timings: - self.assertEqual("sleep", timing["operation-type"]) - self.assertIn("absolute_time", timing) - self.assertIn("request_start", timing) - self.assertIn("request_end", timing) - self.assertGreater(timing["request_end"], timing["request_start"]) + assert timing["operation-type"] == "sleep" + assert "absolute_time" in timing + assert "request_start" in timing + assert "request_end" in timing + assert timing["request_end"] > timing["request_start"] @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -5458,7 +5437,7 @@ async def test_limits_connections(self, es): await r(es, params) # composite runner should limit to two concurrent connections - self.assertEqual(2, self.counter_runner.max_value) + assert self.counter_runner.max_value == 2 @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -5486,10 +5465,10 @@ async def test_rejects_invalid_stream(self, es): } r = runner.Composite() - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: await r(es, params) - self.assertEqual("Requests structure must contain [stream] or [operation-type].", ctx.exception.args[0]) + assert ctx.value.args[0] == "Requests structure must contain [stream] or [operation-type]." @mock.patch("elasticsearch.Elasticsearch") @run_async @@ -5507,12 +5486,11 @@ async def test_rejects_unsupported_operations(self, es): } r = runner.Composite() - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: await r(es, params) - self.assertEqual("Unsupported operation-type [bulk]. Use one of [open-point-in-time, close-point-in-time, " - "search, raw-request, sleep, submit-async-search, get-async-search, delete-async-search].", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Unsupported operation-type [bulk]. Use one of [open-point-in-time, close-point-in-time, " \ + "search, raw-request, sleep, submit-async-search, get-async-search, delete-async-search]." class RequestTimingTests(TestCase): class StaticRequestTiming: @@ -5555,17 +5533,17 @@ async def test_merges_timing_info(self, es): response = await timer(multi_cluster_client, params) - self.assertEqual(5, response["weight"]) - self.assertEqual("ops", response["unit"]) - self.assertTrue(response["success"]) - self.assertIn("dependent_timing", response) + assert response["weight"] == 5 + assert response["unit"] == "ops" + assert response["success"] + assert "dependent_timing" in response timing = response["dependent_timing"] - self.assertEqual("unit-test-operation", timing["operation"]) - self.assertEqual("test-op", timing["operation-type"]) - self.assertIsNotNone(timing["absolute_time"]) - self.assertEqual(7, timing["request_start"]) - self.assertEqual(7.1, timing["request_end"]) - self.assertAlmostEqual(0.1, timing["service_time"]) + assert timing["operation"] == "unit-test-operation" + assert timing["operation-type"] == "test-op" + assert timing["absolute_time"] is not None + assert timing["request_start"] == 7 + assert timing["request_end"] == 7.1 + assert round(abs(0.1-timing["service_time"]), 7) == 0 delegate.assert_called_once_with(multi_cluster_client, params) @@ -5586,18 +5564,18 @@ async def test_creates_new_timing_info(self, es): response = await timer(multi_cluster_client, params) # defaults added by the timing runner - self.assertEqual(1, response["weight"]) - self.assertEqual("ops", response["unit"]) - self.assertTrue(response["success"]) + assert response["weight"] == 1 + assert response["unit"] == "ops" + assert response["success"] - self.assertIn("dependent_timing", response) + assert "dependent_timing" in response timing = response["dependent_timing"] - self.assertEqual("unit-test-operation", timing["operation"]) - self.assertEqual("test-op", timing["operation-type"]) - self.assertIsNotNone(timing["absolute_time"]) - self.assertEqual(7, timing["request_start"]) - self.assertEqual(7.1, timing["request_end"]) - self.assertAlmostEqual(0.1, timing["service_time"]) + assert timing["operation"] == "unit-test-operation" + assert timing["operation-type"] == "test-op" + assert timing["absolute_time"] is not None + assert timing["request_start"] == 7 + assert timing["request_end"] == 7.1 + assert round(abs(0.1-timing["service_time"]), 7) == 0 delegate.assert_called_once_with(multi_cluster_client, params) @@ -5625,7 +5603,7 @@ async def test_is_transparent_on_exception_when_no_retries(self): } retrier = runner.Retry(delegate) - with self.assertRaises(elasticsearch.ConnectionError): + with pytest.raises(elasticsearch.ConnectionError): await retrier(es, params) delegate.assert_called_once_with(es, params) @@ -5643,7 +5621,7 @@ async def test_is_transparent_on_application_error_when_no_retries(self): result = await retrier(es, params) - self.assertEqual(original_return_value, result) + assert result == original_return_value delegate.assert_called_once_with(es, params) @run_async @@ -5679,7 +5657,7 @@ async def test_retries_on_timeout_if_wanted_and_raises_if_no_recovery(self): } retrier = runner.Retry(delegate) - with self.assertRaises(elasticsearch.ConnectionError): + with pytest.raises(elasticsearch.ConnectionError): await retrier(es, params) delegate.assert_has_calls([ @@ -5706,7 +5684,7 @@ async def test_retries_on_timeout_if_wanted_and_returns_first_call(self): retrier = runner.Retry(delegate) result = await retrier(es, params) - self.assertEqual(failed_return_value, result) + assert result == failed_return_value delegate.assert_has_calls([ # has returned a connection error @@ -5740,7 +5718,7 @@ async def test_retries_mixed_timeout_and_application_errors(self): retrier = runner.Retry(delegate) result = await retrier(es, params) - self.assertEqual(success_return_value, result) + assert result == success_return_value delegate.assert_has_calls([ # connection error @@ -5769,7 +5747,7 @@ async def test_does_not_retry_on_timeout_if_not_wanted(self): } retrier = runner.Retry(delegate) - with self.assertRaises(elasticsearch.ConnectionTimeout): + with pytest.raises(elasticsearch.ConnectionTimeout): await retrier(es, params) delegate.assert_called_once_with(es, params) @@ -5794,7 +5772,7 @@ async def test_retries_on_application_error_if_wanted(self): result = await retrier(es, params) - self.assertEqual(success_return_value, result) + assert result == success_return_value delegate.assert_has_calls([ mock.call(es, params), @@ -5818,7 +5796,7 @@ async def test_does_not_retry_on_application_error_if_not_wanted(self): result = await retrier(es, params) - self.assertEqual(failed_return_value, result) + assert result == failed_return_value delegate.assert_called_once_with(es, params) @@ -5836,7 +5814,7 @@ async def test_assumes_success_if_runner_returns_non_dict(self): result = await retrier(es, params) - self.assertEqual((1, "ops"), result) + assert result == (1, "ops") delegate.assert_called_once_with(es, params) @@ -5861,7 +5839,7 @@ async def test_retries_until_success(self): result = await retrier(es, params) - self.assertEqual(success_return_value, result) + assert result == success_return_value delegate.assert_has_calls([mock.call(es, params) for _ in range(failure_count + 1)]) @@ -5870,10 +5848,10 @@ class RemovePrefixTests(TestCase): def test_remove_matching_prefix(self): suffix = runner.remove_prefix("index-20201117", "index") - self.assertEqual(suffix, "-20201117") + assert suffix == "-20201117" def test_prefix_doesnt_exit(self): index_name = "index-20201117" suffix = runner.remove_prefix(index_name, "unrelatedprefix") - self.assertEqual(suffix, index_name) + assert index_name == suffix diff --git a/tests/driver/scheduler_test.py b/tests/driver/scheduler_test.py index 92bb2dbf9..e1ae5abfc 100644 --- a/tests/driver/scheduler_test.py +++ b/tests/driver/scheduler_test.py @@ -19,6 +19,8 @@ import random from unittest import TestCase +import pytest + from esrally import exceptions from esrally.driver import scheduler from esrally.track import track @@ -33,17 +35,17 @@ def assertThroughputEquals(self, sched, expected_average_throughput, msg="", rel for _ in range(0, SchedulerTestCase.ITERATIONS): tn = sched.next(0) # schedule must not go backwards in time - self.assertGreaterEqual(tn, 0, msg) + assert tn >= 0, msg sum += tn actual_average_rate = sum / SchedulerTestCase.ITERATIONS expected_lower_bound = (1.0 - relative_delta) * expected_average_rate expected_upper_bound = (1.0 + relative_delta) * expected_average_rate - self.assertGreaterEqual(actual_average_rate, expected_lower_bound, - f"{msg}: expected target rate to be >= [{expected_lower_bound}] but was [{actual_average_rate}].") - self.assertLessEqual(actual_average_rate, expected_upper_bound, - f"{msg}: expected target rate to be <= [{expected_upper_bound}] but was [{actual_average_rate}].") + assert actual_average_rate >= expected_lower_bound, \ + f"{msg}: expected target rate to be >= [{expected_lower_bound}] but was [{actual_average_rate}]." + assert actual_average_rate <= expected_upper_bound, \ + f"{msg}: expected target rate to be <= [{expected_upper_bound}] but was [{actual_average_rate}]." class DeterministicSchedulerTests(SchedulerTestCase): @@ -75,10 +77,10 @@ def test_scheduler_rejects_differing_throughput_units(self): }) s = scheduler.UnitAwareScheduler(task=task, scheduler_class=scheduler.DeterministicScheduler) - with self.assertRaises(exceptions.RallyAssertionError) as ex: + with pytest.raises(exceptions.RallyAssertionError) as ex: s.after_request(now=None, weight=1000, unit="docs", request_meta_data=None) - self.assertEqual("Target throughput for [bulk-index] is specified in [MB/s] but the task throughput " - "is measured in [docs/s].", ex.exception.args[0]) + assert ex.value.args[0] == "Target throughput for [bulk-index] is specified in [MB/s] but the task throughput " \ + "is measured in [docs/s]." def test_scheduler_adapts_to_changed_weights(self): task = track.Task(name="bulk-index", @@ -94,18 +96,18 @@ def test_scheduler_adapts_to_changed_weights(self): # first request is unthrottled # suppress pylint false positive # pylint: disable=not-callable - self.assertEqual(0, s.next(0)) + assert s.next(0) == 0 # we'll start with bulks of 1.000 docs, which corresponds to 5 requests per second for all clients s.after_request(now=None, weight=1000, unit="docs", request_meta_data=None) # suppress pylint false positive # pylint: disable=not-callable - self.assertEqual(1 / 5 * task.clients, s.next(0)) + assert s.next(0) == 1 / 5 * task.clients # bulk size changes to 10.000 docs, which means one request every two seconds for all clients s.after_request(now=None, weight=10000, unit="docs", request_meta_data=None) # suppress pylint false positive # pylint: disable=not-callable - self.assertEqual(2 * task.clients, s.next(0)) + assert s.next(0) == 2 * task.clients def test_scheduler_accepts_differing_units_pages_and_ops(self): task = track.Task(name="scroll-query", @@ -122,13 +124,13 @@ def test_scheduler_accepts_differing_units_pages_and_ops(self): # first request is unthrottled # suppress pylint false positive # pylint: disable=not-callable - self.assertEqual(0, s.next(0)) + assert s.next(0) == 0 # no exception despite differing units ... s.after_request(now=None, weight=20, unit="pages", request_meta_data=None) # ... and it is still throttled in ops/s # suppress pylint false positive # pylint: disable=not-callable - self.assertEqual(0.1 * task.clients, s.next(0)) + assert s.next(0) == 0.1 * task.clients def test_scheduler_does_not_change_throughput_for_empty_requests(self): task = track.Task(name="match-all-query", @@ -146,20 +148,20 @@ def test_scheduler_does_not_change_throughput_for_empty_requests(self): s.before_request(now=0) # suppress pylint false positive # pylint: disable=not-callable - self.assertEqual(0, s.next(0)) + assert s.next(0) == 0 # ... but it also produced an error (zero ops) s.after_request(now=1, weight=0, unit="ops", request_meta_data=None) # next request is still unthrottled s.before_request(now=1) # suppress pylint false positive # pylint: disable=not-callable - self.assertEqual(0, s.next(0)) + assert s.next(0) == 0 s.after_request(now=2, weight=1, unit="ops", request_meta_data=None) # now we throttle s.before_request(now=2) # suppress pylint false positive # pylint: disable=not-callable - self.assertEqual(0.1 * task.clients, s.next(0)) + assert s.next(0) == 0.1 * task.clients class SchedulerCategorizationTests(TestCase): @@ -174,18 +176,18 @@ def __init__(self, params, my_default_param=True): pass def test_detects_legacy_scheduler(self): - self.assertTrue(scheduler.is_legacy_scheduler(SchedulerCategorizationTests.LegacyScheduler)) - self.assertTrue(scheduler.is_legacy_scheduler(SchedulerCategorizationTests.LegacySchedulerWithAdditionalArgs)) + assert scheduler.is_legacy_scheduler(SchedulerCategorizationTests.LegacyScheduler) + assert scheduler.is_legacy_scheduler(SchedulerCategorizationTests.LegacySchedulerWithAdditionalArgs) def test_a_regular_scheduler_is_not_a_legacy_scheduler(self): - self.assertFalse(scheduler.is_legacy_scheduler(scheduler.DeterministicScheduler)) - self.assertFalse(scheduler.is_legacy_scheduler(scheduler.UnitAwareScheduler)) + assert not scheduler.is_legacy_scheduler(scheduler.DeterministicScheduler) + assert not scheduler.is_legacy_scheduler(scheduler.UnitAwareScheduler) def test_is_simple_scheduler(self): - self.assertTrue(scheduler.is_simple_scheduler(scheduler.PoissonScheduler)) + assert scheduler.is_simple_scheduler(scheduler.PoissonScheduler) def test_is_not_simple_scheduler(self): - self.assertFalse(scheduler.is_simple_scheduler(scheduler.UnitAwareScheduler)) + assert not scheduler.is_simple_scheduler(scheduler.UnitAwareScheduler) class SchedulerThrottlingTests(TestCase): @@ -199,19 +201,19 @@ def task(self, schedule=None, target_throughput=None, target_interval=None): return track.Task("test", op, schedule=schedule, params=params) def test_throttled_by_target_throughput(self): - self.assertFalse(scheduler.run_unthrottled(self.task(target_throughput=4, schedule="deterministic"))) + assert not scheduler.run_unthrottled(self.task(target_throughput=4, schedule="deterministic")) def test_throttled_by_target_interval(self): - self.assertFalse(scheduler.run_unthrottled(self.task(target_interval=2))) + assert not scheduler.run_unthrottled(self.task(target_interval=2)) def test_throttled_by_custom_schedule(self): - self.assertFalse(scheduler.run_unthrottled(self.task(schedule="my-custom-schedule"))) + assert not scheduler.run_unthrottled(self.task(schedule="my-custom-schedule")) def test_unthrottled_by_target_throughput(self): - self.assertTrue(scheduler.run_unthrottled(self.task(target_throughput=None))) + assert scheduler.run_unthrottled(self.task(target_throughput=None)) def test_unthrottled_by_target_interval(self): - self.assertTrue(scheduler.run_unthrottled(self.task(target_interval=0, schedule="poisson"))) + assert scheduler.run_unthrottled(self.task(target_interval=0, schedule="poisson")) class LegacyWrappingSchedulerTests(TestCase): @@ -239,5 +241,5 @@ def test_legacy_scheduler(self): s = scheduler.scheduler_for(task) - self.assertEqual(0, s.next(0)) - self.assertEqual(0, s.next(0)) + assert s.next(0) == 0 + assert s.next(0) == 0 diff --git a/tests/mechanic/java_resolver_test.py b/tests/mechanic/java_resolver_test.py index 052651bd4..d6354c9d6 100644 --- a/tests/mechanic/java_resolver_test.py +++ b/tests/mechanic/java_resolver_test.py @@ -18,6 +18,8 @@ import unittest.mock as mock from unittest import TestCase +import pytest + from esrally import exceptions from esrally.mechanic import java_resolver @@ -30,8 +32,8 @@ def test_resolves_java_home_for_default_runtime_jdk(self, resolve_jvm_path): specified_runtime_jdk=None, provides_bundled_jdk=True) - self.assertEqual(major, 12) - self.assertEqual(java_home, "/opt/jdk12") + assert major == 12 + assert java_home == "/opt/jdk12" @mock.patch("esrally.utils.jvm.resolve_path") def test_resolves_java_home_for_specific_runtime_jdk(self, resolve_jvm_path): @@ -40,8 +42,8 @@ def test_resolves_java_home_for_specific_runtime_jdk(self, resolve_jvm_path): specified_runtime_jdk=8, provides_bundled_jdk=True) - self.assertEqual(major, 8) - self.assertEqual(java_home, "/opt/jdk8") + assert major == 8 + assert java_home == "/opt/jdk8" resolve_jvm_path.assert_called_with([8]) def test_resolves_java_home_for_bundled_jdk(self): @@ -50,12 +52,11 @@ def test_resolves_java_home_for_bundled_jdk(self): provides_bundled_jdk=True) # assumes most recent JDK - self.assertEqual(major, 12) + assert major == 12 # does not set JAVA_HOME for the bundled JDK - self.assertEqual(java_home, None) + assert java_home is None def test_disallowed_bundled_jdk(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: java_resolver.java_home("12,11,10,9,8", specified_runtime_jdk="bundled") - self.assertEqual("This Elasticsearch version does not contain a bundled JDK. Please specify a different runtime JDK.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "This Elasticsearch version does not contain a bundled JDK. Please specify a different runtime JDK." diff --git a/tests/mechanic/launcher_test.py b/tests/mechanic/launcher_test.py index 3a7a3feaa..47ecb6cb7 100644 --- a/tests/mechanic/launcher_test.py +++ b/tests/mechanic/launcher_test.py @@ -26,6 +26,7 @@ import elasticsearch import psutil +import pytest from esrally import config, exceptions, telemetry from esrally.mechanic import launcher, cluster @@ -184,12 +185,12 @@ def test_daemon_start_stop(self, wait_for_pidfile, chdir, get_size, supports, ja node_root_path="/tmp", binary_path="/tmp", data_paths="/tmp")) nodes = proc_launcher.start(node_configs) - self.assertEqual(len(nodes), 2) - self.assertEqual(nodes[0].pid, MOCK_PID_VALUE) + assert len(nodes) == 2 + assert MOCK_PID_VALUE == nodes[0].pid stopped_nodes = proc_launcher.stop(nodes, ms) # all nodes should be stopped - self.assertEqual(nodes, stopped_nodes) + assert stopped_nodes == nodes @mock.patch('psutil.Process', new=TerminatedProcess) def test_daemon_stop_with_already_terminated_process(self): @@ -212,7 +213,7 @@ def test_daemon_stop_with_already_terminated_process(self): stopped_nodes = proc_launcher.stop(nodes, ms) # no nodes should have been stopped (they were already stopped) - self.assertEqual([], stopped_nodes) + assert stopped_nodes == [] # flight recorder shows a warning for several seconds before continuing @mock.patch("esrally.time.sleep") @@ -228,11 +229,12 @@ def test_env_options_order(self, sleep): t = telemetry.Telemetry(["jfr"], devices=node_telemetry) env = proc_launcher._prepare_env(node_name="node0", java_home="/java_home", t=t) - self.assertEqual("/java_home/bin" + os.pathsep + os.environ["PATH"], env["PATH"]) - self.assertEqual("-XX:+ExitOnOutOfMemoryError -XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints " - "-XX:+UnlockCommercialFeatures -XX:+FlightRecorder " - "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true,dumponexitpath=/tmp/telemetry/profile.jfr " # pylint: disable=line-too-long - "-XX:StartFlightRecording=defaultrecording=true", env["ES_JAVA_OPTS"]) + assert env["PATH"] == "/java_home/bin" + os.pathsep + os.environ["PATH"] + assert env["ES_JAVA_OPTS"] == ( + "-XX:+ExitOnOutOfMemoryError -XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints " \ + "-XX:+UnlockCommercialFeatures -XX:+FlightRecorder " \ + "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true,dumponexitpath=/tmp/telemetry/profile.jfr " # pylint: disable=line-too-long + "-XX:StartFlightRecording=defaultrecording=true") def test_bundled_jdk_not_in_path(self): cfg = config.Config() @@ -246,8 +248,8 @@ def test_bundled_jdk_not_in_path(self): env = proc_launcher._prepare_env(node_name="node0", java_home=None, t=t) # unmodified - self.assertEqual(os.environ["PATH"], env["PATH"]) - self.assertIsNone(env.get("JAVA_HOME")) + assert env["PATH"] == os.environ["PATH"] + assert env.get("JAVA_HOME") is None def test_pass_env_vars(self): cfg = config.Config() @@ -263,9 +265,9 @@ def test_pass_env_vars(self): env = proc_launcher._prepare_env(node_name="node0", java_home=None, t=t) # unmodified - self.assertEqual(os.environ["JAVA_HOME"], env["JAVA_HOME"]) - self.assertEqual(os.environ["FOO1"], env["FOO1"]) - self.assertEqual(env["ES_JAVA_OPTS"], "-XX:+ExitOnOutOfMemoryError") + assert env["JAVA_HOME"] == os.environ["JAVA_HOME"] + assert env["FOO1"] == os.environ["FOO1"] + assert env["ES_JAVA_OPTS"] == "-XX:+ExitOnOutOfMemoryError" def test_pass_java_opts(self): cfg = config.Config() @@ -280,17 +282,17 @@ def test_pass_java_opts(self): env = proc_launcher._prepare_env(node_name="node0", java_home=None, t=t) # unmodified - self.assertEqual(os.environ["ES_JAVA_OPTS"], env["ES_JAVA_OPTS"]) + assert env["ES_JAVA_OPTS"] == os.environ["ES_JAVA_OPTS"] @mock.patch("esrally.time.sleep") def test_pidfile_wait_race(self, sleep): mo = mock_open() - with self.assertRaises(exceptions.LaunchError): + with pytest.raises(exceptions.LaunchError): mo.side_effect = FileNotFoundError testclock = TestClock(IterationBasedStopWatch(1)) with mock.patch("builtins.open", mo): launcher.wait_for_pidfile("testpidfile", clock=testclock) - with self.assertRaises(exceptions.LaunchError): + with pytest.raises(exceptions.LaunchError): mo = mock_open() testclock = TestClock(IterationBasedStopWatch(1)) with mock.patch("builtins.open", mo): @@ -299,7 +301,7 @@ def test_pidfile_wait_race(self, sleep): testclock = TestClock(IterationBasedStopWatch(1)) with mock.patch("builtins.open", mo): ret = launcher.wait_for_pidfile("testpidfile", clock=testclock) - self.assertEqual(ret, 1234) + assert ret == 1234 def mock_open_with_delayed_write(read_data): mo = mock_open(read_data=read_data) @@ -319,7 +321,7 @@ def _stub_first_read(*args, **kwargs): testclock = TestClock(IterationBasedStopWatch(2)) with mock.patch("builtins.open", mock_open_with_delayed_write(read_data="4321")): ret = launcher.wait_for_pidfile("testpidfile", clock=testclock) - self.assertEqual(ret, 4321) + assert ret == 4321 class IterationBasedStopWatch: @@ -365,14 +367,14 @@ def test_starts_container_successfully(self, run_subprocess_with_output, run_sub data_paths="/tmp") nodes = docker.start([node_config]) - self.assertEqual(1, len(nodes)) + assert len(nodes) == 1 node = nodes[0] - self.assertEqual(0, node.pid) - self.assertEqual("/bin", node.binary_path) - self.assertEqual("127.0.0.1", node.host_name) - self.assertEqual("testnode", node.node_name) - self.assertIsNotNone(node.telemetry) + assert node.pid == 0 + assert node.binary_path == "/bin" + assert node.host_name == "127.0.0.1" + assert node.node_name == "testnode" + assert node.telemetry is not None run_subprocess_with_logging.assert_called_once_with("docker-compose -f /bin/docker-compose.yml up -d") run_subprocess_with_output.assert_has_calls([ @@ -396,7 +398,7 @@ def test_container_not_started(self, run_subprocess_with_output, run_subprocess_ ip="127.0.0.1", node_name="testnode", node_root_path="/tmp", binary_path="/bin", data_paths="/tmp") - with self.assertRaisesRegex(exceptions.LaunchError, "No healthy running container after 600 seconds!"): + with pytest.raises(exceptions.LaunchError, match="No healthy running container after 600 seconds!"): docker.start([node_config]) @mock.patch("esrally.telemetry.add_metadata_for_node") @@ -427,6 +429,6 @@ def test_stops_container_when_no_metrics_store_is_provided(self, run_subprocess_ docker.stop(nodes, metrics_store=metrics_store) - self.assertEqual(0, add_metadata_for_node.call_count) + assert add_metadata_for_node.call_count == 0 run_subprocess_with_logging.assert_called_once_with("docker-compose -f /bin/docker-compose.yml down") diff --git a/tests/mechanic/mechanic_test.py b/tests/mechanic/mechanic_test.py index d67563b8d..633b1060d 100644 --- a/tests/mechanic/mechanic_test.py +++ b/tests/mechanic/mechanic_test.py @@ -18,6 +18,8 @@ import unittest.mock as mock from unittest import TestCase +import pytest + from esrally import config, exceptions from esrally.mechanic import mechanic @@ -34,11 +36,11 @@ def test_converts_valid_hosts(self, resolver): {"host": "site.example.com", "port": 9200}, ] - self.assertEqual([ + assert mechanic.to_ip_port(hosts) == [ ("127.0.0.1", 9200), ("10.16.23.5", 9200), ("11.22.33.44", 9200), - ], mechanic.to_ip_port(hosts)) + ] @mock.patch("esrally.utils.net.resolve") def test_rejects_hosts_with_unexpected_properties(self, resolver): @@ -50,10 +52,10 @@ def test_rejects_hosts_with_unexpected_properties(self, resolver): {"host": "site.example.com", "port": 9200}, ] - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: mechanic.to_ip_port(hosts) - self.assertEqual("When specifying nodes to be managed by Rally you can only supply hostname:port pairs (e.g. 'localhost:9200'), " - "any additional options cannot be supported.", ctx.exception.args[0]) + assert ctx.value.args[0] == "When specifying nodes to be managed by Rally you can only supply hostname:port pairs (e.g. 'localhost:9200'), " \ + "any additional options cannot be supported." def test_groups_nodes_by_host(self): ip_port = [ @@ -65,14 +67,13 @@ def test_groups_nodes_by_host(self): ("11.22.33.44", 9200), ] - self.assertDictEqual( + assert mechanic.nodes_by_host(ip_port) == \ { ("127.0.0.1", 9200): [0, 1, 2], ("10.16.23.5", 9200): [3], ("11.22.33.44", 9200): [4, 5], - }, mechanic.nodes_by_host(ip_port) - ) + } def test_extract_all_node_ips(self): ip_port = [ @@ -83,8 +84,7 @@ def test_extract_all_node_ips(self): ("11.22.33.44", 9200), ("11.22.33.44", 9200), ] - self.assertSetEqual({"127.0.0.1", "10.16.23.5", "11.22.33.44"}, - mechanic.extract_all_node_ips(ip_port)) + assert mechanic.extract_all_node_ips(ip_port) == {"127.0.0.1", "10.16.23.5", "11.22.33.44"} class MechanicTests(TestCase): @@ -122,10 +122,10 @@ def test_start_stop_nodes(self, cleanup): metrics_store = mock.Mock() m = MechanicTests.TestMechanic(cfg, metrics_store, supplier, provisioners, launcher) m.start_engine() - self.assertTrue(launcher.started) + assert launcher.started for p in provisioners: - self.assertTrue(p.prepare.called) + assert p.prepare.called m.stop_engine() - self.assertFalse(launcher.started) - self.assertEqual(cleanup.call_count, 2) + assert not launcher.started + assert cleanup.call_count == 2 diff --git a/tests/mechanic/provisioner_test.py b/tests/mechanic/provisioner_test.py index 51f2d3f0f..d1d7e2396 100644 --- a/tests/mechanic/provisioner_test.py +++ b/tests/mechanic/provisioner_test.py @@ -21,6 +21,8 @@ import unittest.mock as mock from unittest import TestCase +import pytest + from esrally import exceptions from esrally.mechanic import provisioner, team @@ -57,16 +59,16 @@ def null_apply_config(source_root_path, target_root_path, config_vars): apply_config=null_apply_config) node_config = p.prepare({"elasticsearch": "/opt/elasticsearch-5.0.0.tar.gz"}) - self.assertEqual("8", node_config.car_runtime_jdks) - self.assertEqual("/opt/elasticsearch-5.0.0", node_config.binary_path) - self.assertEqual(["/opt/elasticsearch-5.0.0/data"], node_config.data_paths) + assert node_config.car_runtime_jdks == "8" + assert node_config.binary_path == "/opt/elasticsearch-5.0.0" + assert node_config.data_paths == ["/opt/elasticsearch-5.0.0/data"] - self.assertEqual(1, len(apply_config_calls)) + assert len(apply_config_calls) == 1 source_root_path, target_root_path, config_vars = apply_config_calls[0] - self.assertEqual(HOME_DIR + "/.rally/benchmarks/teams/default/my-car", source_root_path) - self.assertEqual("/opt/elasticsearch-5.0.0", target_root_path) - self.assertEqual({ + assert source_root_path == HOME_DIR + "/.rally/benchmarks/teams/default/my-car" + assert target_root_path == "/opt/elasticsearch-5.0.0" + assert config_vars == { "cluster_settings": { }, "heap": "4g", @@ -85,7 +87,7 @@ def null_apply_config(source_root_path, target_root_path, config_vars): "all_node_names": "[\"rally-node-0\",\"rally-node-1\"]", "minimum_master_nodes": 2, "install_root_path": "/opt/elasticsearch-5.0.0" - }, config_vars) + } class NoopHookHandler: def __init__(self, plugin): @@ -168,19 +170,19 @@ def null_apply_config(source_root_path, target_root_path, config_vars): apply_config=null_apply_config) node_config = p.prepare({"elasticsearch": "/opt/elasticsearch-5.0.0.tar.gz"}) - self.assertEqual("8", node_config.car_runtime_jdks) - self.assertEqual("/opt/elasticsearch-5.0.0", node_config.binary_path) - self.assertEqual(["/opt/elasticsearch-5.0.0/data"], node_config.data_paths) + assert node_config.car_runtime_jdks == "8" + assert node_config.binary_path == "/opt/elasticsearch-5.0.0" + assert node_config.data_paths == ["/opt/elasticsearch-5.0.0/data"] - self.assertEqual(1, len(apply_config_calls)) + assert len(apply_config_calls) == 1 source_root_path, target_root_path, config_vars = apply_config_calls[0] - self.assertEqual(HOME_DIR + "/.rally/benchmarks/teams/default/my-car", source_root_path) - self.assertEqual("/opt/elasticsearch-5.0.0", target_root_path) + assert source_root_path == HOME_DIR + "/.rally/benchmarks/teams/default/my-car" + assert target_root_path == "/opt/elasticsearch-5.0.0" self.maxDiff = None - self.assertEqual({ + assert config_vars == { "cluster_settings": { "plugin.mandatory": ["x-pack-security"] }, @@ -203,7 +205,7 @@ def null_apply_config(source_root_path, target_root_path, config_vars): "plugin_name": "x-pack-security", "xpack_security_enabled": True - }, config_vars) + } @mock.patch("glob.glob", lambda p: ["/opt/elasticsearch-6.3.0"]) @mock.patch("esrally.utils.io.decompress") @@ -246,19 +248,19 @@ def null_apply_config(source_root_path, target_root_path, config_vars): apply_config=null_apply_config) node_config = p.prepare({"elasticsearch": "/opt/elasticsearch-6.3.0.tar.gz"}) - self.assertEqual("8", node_config.car_runtime_jdks) - self.assertEqual("/opt/elasticsearch-6.3.0", node_config.binary_path) - self.assertEqual(["/opt/elasticsearch-6.3.0/data"], node_config.data_paths) + assert node_config.car_runtime_jdks == "8" + assert node_config.binary_path == "/opt/elasticsearch-6.3.0" + assert node_config.data_paths == ["/opt/elasticsearch-6.3.0/data"] - self.assertEqual(1, len(apply_config_calls)) + assert len(apply_config_calls) == 1 source_root_path, target_root_path, config_vars = apply_config_calls[0] - self.assertEqual(HOME_DIR + "/.rally/benchmarks/teams/default/my-car", source_root_path) - self.assertEqual("/opt/elasticsearch-6.3.0", target_root_path) + assert source_root_path == HOME_DIR + "/.rally/benchmarks/teams/default/my-car" + assert target_root_path == "/opt/elasticsearch-6.3.0" self.maxDiff = None - self.assertEqual({ + assert config_vars == { "cluster_settings": { "plugin.mandatory": ["x-pack"] }, @@ -281,7 +283,7 @@ def null_apply_config(source_root_path, target_root_path, config_vars): "plugin_name": "x-pack-security", "xpack_security_enabled": True - }, config_vars) + } class NoopHookHandler: @@ -316,9 +318,9 @@ def test_prepare_default_data_paths(self, mock_rm, mock_ensure_dir, mock_decompr node_root_dir=HOME_DIR + "/.rally/benchmarks/races/unittest") installer.install("/data/builds/distributions") - self.assertEqual(installer.es_home_path, "/install/elasticsearch-5.0.0-SNAPSHOT") + assert installer.es_home_path == "/install/elasticsearch-5.0.0-SNAPSHOT" - self.assertEqual({ + assert installer.variables == { "cluster_name": "rally-benchmark", "node_name": "rally-node-0", "data_paths": ["/install/elasticsearch-5.0.0-SNAPSHOT/data"], @@ -332,9 +334,9 @@ def test_prepare_default_data_paths(self, mock_rm, mock_ensure_dir, mock_decompr "all_node_names": "[\"rally-node-0\",\"rally-node-1\"]", "minimum_master_nodes": 2, "install_root_path": "/install/elasticsearch-5.0.0-SNAPSHOT" - }, installer.variables) + } - self.assertEqual(installer.data_paths, ["/install/elasticsearch-5.0.0-SNAPSHOT/data"]) + assert installer.data_paths == ["/install/elasticsearch-5.0.0-SNAPSHOT/data"] @mock.patch("glob.glob", lambda p: ["/install/elasticsearch-5.0.0-SNAPSHOT"]) @mock.patch("esrally.utils.io.decompress") @@ -354,9 +356,9 @@ def test_prepare_user_provided_data_path(self, mock_rm, mock_ensure_dir, mock_de node_root_dir="~/.rally/benchmarks/races/unittest") installer.install("/data/builds/distributions") - self.assertEqual(installer.es_home_path, "/install/elasticsearch-5.0.0-SNAPSHOT") + assert installer.es_home_path == "/install/elasticsearch-5.0.0-SNAPSHOT" - self.assertEqual({ + assert installer.variables == { "cluster_name": "rally-benchmark", "node_name": "rally-node-0", "data_paths": ["/tmp/some/data-path-dir"], @@ -370,9 +372,9 @@ def test_prepare_user_provided_data_path(self, mock_rm, mock_ensure_dir, mock_de "all_node_names": "[\"rally-node-0\",\"rally-node-1\"]", "minimum_master_nodes": 2, "install_root_path": "/install/elasticsearch-5.0.0-SNAPSHOT" - }, installer.variables) + } - self.assertEqual(installer.data_paths, ["/tmp/some/data-path-dir"]) + assert installer.data_paths == ["/tmp/some/data-path-dir"] def test_invokes_hook_with_java_home(self): installer = provisioner.ElasticsearchInstaller(car=team.Car(names="defaults", @@ -388,12 +390,11 @@ def test_invokes_hook_with_java_home(self): node_root_dir="~/.rally/benchmarks/races/unittest", hook_handler_class=NoopHookHandler) - self.assertEqual(0, len(installer.hook_handler.hook_calls)) + assert len(installer.hook_handler.hook_calls) == 0 installer.invoke_install_hook(team.BootstrapPhase.post_install, {"foo": "bar"}) - self.assertEqual(1, len(installer.hook_handler.hook_calls)) - self.assertEqual({"foo": "bar"}, installer.hook_handler.hook_calls["post_install"]["variables"]) - self.assertEqual({"env": {"JAVA_HOME": "/usr/local/javas/java8"}}, - installer.hook_handler.hook_calls["post_install"]["kwargs"]) + assert len(installer.hook_handler.hook_calls) == 1 + assert installer.hook_handler.hook_calls["post_install"]["variables"] == {"foo": "bar"} + assert installer.hook_handler.hook_calls["post_install"]["kwargs"] == {"env": {"JAVA_HOME": "/usr/local/javas/java8"}} def test_invokes_hook_no_java_home(self): installer = provisioner.ElasticsearchInstaller(car=team.Car(names="defaults", @@ -409,11 +410,11 @@ def test_invokes_hook_no_java_home(self): node_root_dir="~/.rally/benchmarks/races/unittest", hook_handler_class=NoopHookHandler) - self.assertEqual(0, len(installer.hook_handler.hook_calls)) + assert len(installer.hook_handler.hook_calls) == 0 installer.invoke_install_hook(team.BootstrapPhase.post_install, {"foo": "bar"}) - self.assertEqual(1, len(installer.hook_handler.hook_calls)) - self.assertEqual({"foo": "bar"}, installer.hook_handler.hook_calls["post_install"]["variables"]) - self.assertEqual({"env": {}}, installer.hook_handler.hook_calls["post_install"]["kwargs"]) + assert len(installer.hook_handler.hook_calls) == 1 + assert installer.hook_handler.hook_calls["post_install"]["variables"] == {"foo": "bar"} + assert installer.hook_handler.hook_calls["post_install"]["kwargs"] == {"env": {}} class PluginInstallerTests(TestCase): @@ -458,9 +459,9 @@ def test_install_unknown_plugin(self, installer_subprocess): java_home="/usr/local/javas/java8", hook_handler_class=NoopHookHandler) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: installer.install(es_home_path="/opt/elasticsearch") - self.assertEqual("Unknown plugin [unknown]", ctx.exception.args[0]) + assert ctx.value.args[0] == "Unknown plugin [unknown]" installer_subprocess.assert_called_with( '/opt/elasticsearch/bin/elasticsearch-plugin install --batch "unknown"', @@ -476,9 +477,9 @@ def test_install_plugin_with_io_error(self, installer_subprocess): java_home="/usr/local/javas/java8", hook_handler_class=NoopHookHandler) - with self.assertRaises(exceptions.SupplyError) as ctx: + with pytest.raises(exceptions.SupplyError) as ctx: installer.install(es_home_path="/opt/elasticsearch") - self.assertEqual("I/O error while trying to install [simple]", ctx.exception.args[0]) + assert ctx.value.args[0] == "I/O error while trying to install [simple]" installer_subprocess.assert_called_with( '/opt/elasticsearch/bin/elasticsearch-plugin install --batch "simple"', @@ -494,10 +495,9 @@ def test_install_plugin_with_unknown_error(self, installer_subprocess): java_home="/usr/local/javas/java8", hook_handler_class=NoopHookHandler) - with self.assertRaises(exceptions.RallyError) as ctx: + with pytest.raises(exceptions.RallyError) as ctx: installer.install(es_home_path="/opt/elasticsearch") - self.assertEqual("Unknown error while trying to install [simple] (installer return code [12987]). Please check the logs.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Unknown error while trying to install [simple] (installer return code [12987]). Please check the logs." installer_subprocess.assert_called_with( '/opt/elasticsearch/bin/elasticsearch-plugin install --batch "simple"', @@ -512,9 +512,9 @@ def test_pass_plugin_properties(self): java_home="/usr/local/javas/java8", hook_handler_class=NoopHookHandler) - self.assertEqual("unit-test-plugin", installer.plugin_name) - self.assertEqual({"active": True}, installer.variables) - self.assertEqual(["/etc/plugin"], installer.config_source_paths) + assert installer.plugin_name == "unit-test-plugin" + assert installer.variables == {"active": True} + assert installer.config_source_paths == ["/etc/plugin"] def test_invokes_hook_with_java_home(self): plugin = team.PluginDescriptor(name="unit-test-plugin", @@ -525,12 +525,11 @@ def test_invokes_hook_with_java_home(self): java_home="/usr/local/javas/java8", hook_handler_class=NoopHookHandler) - self.assertEqual(0, len(installer.hook_handler.hook_calls)) + assert len(installer.hook_handler.hook_calls) == 0 installer.invoke_install_hook(team.BootstrapPhase.post_install, {"foo": "bar"}) - self.assertEqual(1, len(installer.hook_handler.hook_calls)) - self.assertEqual({"foo": "bar"}, installer.hook_handler.hook_calls["post_install"]["variables"]) - self.assertEqual({"env": {"JAVA_HOME": "/usr/local/javas/java8"}}, - installer.hook_handler.hook_calls["post_install"]["kwargs"]) + assert len(installer.hook_handler.hook_calls) == 1 + assert installer.hook_handler.hook_calls["post_install"]["variables"] == {"foo": "bar"} + assert installer.hook_handler.hook_calls["post_install"]["kwargs"] == {"env": {"JAVA_HOME": "/usr/local/javas/java8"}} def test_invokes_hook_no_java_home(self): plugin = team.PluginDescriptor(name="unit-test-plugin", @@ -541,11 +540,11 @@ def test_invokes_hook_no_java_home(self): java_home=None, hook_handler_class=NoopHookHandler) - self.assertEqual(0, len(installer.hook_handler.hook_calls)) + assert len(installer.hook_handler.hook_calls) == 0 installer.invoke_install_hook(team.BootstrapPhase.post_install, {"foo": "bar"}) - self.assertEqual(1, len(installer.hook_handler.hook_calls)) - self.assertEqual({"foo": "bar"}, installer.hook_handler.hook_calls["post_install"]["variables"]) - self.assertEqual({"env": {}}, installer.hook_handler.hook_calls["post_install"]["kwargs"]) + assert len(installer.hook_handler.hook_calls) == 1 + assert installer.hook_handler.hook_calls["post_install"]["variables"] == {"foo": "bar"} + assert installer.hook_handler.hook_calls["post_install"]["kwargs"] == {"env": {}} class DockerProvisionerTests(TestCase): @@ -571,7 +570,7 @@ def test_provisioning_with_defaults(self, uuid4): distribution_version="6.3.0", rally_root=rally_root) - self.assertDictEqual({ + assert docker.config_vars == { "cluster_name": "rally-benchmark", "node_name": "rally-node-0", "install_root_path": "/usr/share/elasticsearch", @@ -585,9 +584,9 @@ def test_provisioning_with_defaults(self, uuid4): "cluster_settings": { }, "docker_image": "docker.elastic.co/elasticsearch/elasticsearch-oss" - }, docker.config_vars) + } - self.assertDictEqual({ + assert docker.docker_vars(mounts={}) == { "es_data_dir": data_dir, "es_log_dir": log_dir, "es_heap_dump_dir": heap_dump_dir, @@ -595,11 +594,11 @@ def test_provisioning_with_defaults(self, uuid4): "docker_image": "docker.elastic.co/elasticsearch/elasticsearch-oss", "http_port": 39200, "mounts": {} - }, docker.docker_vars(mounts={})) + } docker_cfg = docker._render_template_from_file(docker.docker_vars(mounts={})) - self.assertEqual( + assert docker_cfg == \ """version: '2.2' services: elasticsearch1: @@ -623,7 +622,7 @@ def test_provisioning_with_defaults(self, uuid4): test: nc -z 127.0.0.1 39200 interval: 5s timeout: 2s - retries: 10""" % (data_dir, log_dir, heap_dump_dir), docker_cfg) + retries: 10""" % (data_dir, log_dir, heap_dump_dir) @mock.patch("uuid.uuid4") def test_provisioning_with_variables(self, uuid4): @@ -651,7 +650,7 @@ def test_provisioning_with_variables(self, uuid4): docker_cfg = docker._render_template_from_file(docker.docker_vars(mounts={})) - self.assertEqual( + assert docker_cfg == \ """version: '2.2' services: elasticsearch1: @@ -677,7 +676,7 @@ def test_provisioning_with_variables(self, uuid4): test: nc -z 127.0.0.1 39200 interval: 5s timeout: 2s - retries: 10""" % (data_dir, log_dir, heap_dump_dir), docker_cfg) + retries: 10""" % (data_dir, log_dir, heap_dump_dir) class CleanupTests(TestCase): @@ -688,8 +687,8 @@ def test_preserves(self, mock_path_exists, mock_rm): provisioner.cleanup(preserve=True, install_dir="./rally/races/install", data_paths=["./rally/races/data"]) - self.assertEqual(mock_path_exists.call_count, 0) - self.assertEqual(mock_rm.call_count, 0) + assert mock_path_exists.call_count == 0 + assert mock_rm.call_count == 0 @mock.patch("shutil.rmtree") @mock.patch("os.path.exists") diff --git a/tests/mechanic/supplier_test.py b/tests/mechanic/supplier_test.py index d2a459e41..4174cf693 100644 --- a/tests/mechanic/supplier_test.py +++ b/tests/mechanic/supplier_test.py @@ -21,25 +21,25 @@ import unittest.mock as mock from unittest import TestCase +import pytest + from esrally import exceptions, config from esrally.mechanic import supplier, team class RevisionExtractorTests(TestCase): def test_single_revision(self): - self.assertDictEqual({"elasticsearch": "67c2f42", "all": "67c2f42"}, supplier._extract_revisions("67c2f42")) - self.assertDictEqual({"elasticsearch": "current", "all": "current"}, supplier._extract_revisions("current")) - self.assertDictEqual({"elasticsearch": "@2015-01-01-01:00:00", "all": "@2015-01-01-01:00:00"}, - supplier._extract_revisions("@2015-01-01-01:00:00")) + assert supplier._extract_revisions("67c2f42") == {"elasticsearch": "67c2f42", "all": "67c2f42"} + assert supplier._extract_revisions("current") == {"elasticsearch": "current", "all": "current"} + assert supplier._extract_revisions("@2015-01-01-01:00:00") == {"elasticsearch": "@2015-01-01-01:00:00", "all": "@2015-01-01-01:00:00"} def test_multiple_revisions(self): - self.assertDictEqual({"elasticsearch": "67c2f42", "x-pack": "@2015-01-01-01:00:00", "some-plugin": "current"}, - supplier._extract_revisions("elasticsearch:67c2f42,x-pack:@2015-01-01-01:00:00,some-plugin:current")) + assert supplier._extract_revisions("elasticsearch:67c2f42,x-pack:@2015-01-01-01:00:00,some-plugin:current") == {"elasticsearch": "67c2f42", "x-pack": "@2015-01-01-01:00:00", "some-plugin": "current"} def test_invalid_revisions(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: supplier._extract_revisions("elasticsearch 67c2f42,x-pack:current") - self.assertEqual("Revision [elasticsearch 67c2f42] does not match expected format [name:revision].", ctx.exception.args[0]) + assert ctx.value.args[0] == "Revision [elasticsearch 67c2f42] does not match expected format [name:revision]." class SourceRepositoryTests(TestCase): @@ -72,8 +72,8 @@ def test_checkout_current(self, mock_is_working_copy, mock_clone, mock_pull, moc s.fetch("current") mock_is_working_copy.assert_called_with("/src") - self.assertEqual(0, mock_clone.call_count) - self.assertEqual(0, mock_pull.call_count) + assert mock_clone.call_count == 0 + assert mock_pull.call_count == 0 mock_head_revision.assert_called_with("/src")\ @@ -91,8 +91,8 @@ def test_checkout_revision_for_local_only_repo(self, mock_is_working_copy, mock_ s.fetch("67c2f42") mock_is_working_copy.assert_called_with("/src") - self.assertEqual(0, mock_clone.call_count) - self.assertEqual(0, mock_pull.call_count) + assert mock_clone.call_count == 0 + assert mock_pull.call_count == 0 mock_checkout.assert_called_with("/src", "67c2f42") mock_head_revision.assert_called_with("/src") @@ -125,12 +125,12 @@ def test_checkout_revision(self, mock_is_working_copy, mock_pull_revision, mock_ mock_head_revision.assert_called_with("/src") def test_is_commit_hash(self): - self.assertTrue(supplier.SourceRepository.is_commit_hash("67c2f42")) + assert supplier.SourceRepository.is_commit_hash("67c2f42") def test_is_not_commit_hash(self): - self.assertFalse(supplier.SourceRepository.is_commit_hash("latest")) - self.assertFalse(supplier.SourceRepository.is_commit_hash("current")) - self.assertFalse(supplier.SourceRepository.is_commit_hash("@2015-01-01-01:00:00")) + assert not supplier.SourceRepository.is_commit_hash("latest") + assert not supplier.SourceRepository.is_commit_hash("current") + assert not supplier.SourceRepository.is_commit_hash("@2015-01-01-01:00:00") class BuilderTests(TestCase): @@ -174,15 +174,13 @@ def test_build_on_jdk_10(self, jvm_resolve_path, mock_run_subprocess): class TemplateRendererTests(TestCase): def test_uses_provided_values(self): renderer = supplier.TemplateRenderer(version="1.2.3", os_name="Windows", arch="arm7") - self.assertEqual("This is version 1.2.3 on Windows with a arm7 CPU.", - renderer.render("This is version {{VERSION}} on {{OSNAME}} with a {{ARCH}} CPU.")) + assert renderer.render("This is version {{VERSION}} on {{OSNAME}} with a {{ARCH}} CPU.") == "This is version 1.2.3 on Windows with a arm7 CPU." @mock.patch("esrally.utils.sysstats.os_name", return_value="Linux") @mock.patch("esrally.utils.sysstats.cpu_arch", return_value="X86_64") def test_uses_derived_values(self, os_name, cpu_arch): renderer = supplier.TemplateRenderer(version="1.2.3") - self.assertEqual("This is version 1.2.3 on linux with a x86_64 CPU.", - renderer.render("This is version {{VERSION}} on {{OSNAME}} with a {{ARCH}} CPU.")) + assert renderer.render("This is version {{VERSION}} on {{OSNAME}} with a {{ARCH}} CPU.") == "This is version 1.2.3 on linux with a x86_64 CPU." class CachedElasticsearchSourceSupplierTests(TestCase): @@ -218,10 +216,10 @@ def add_es_artifact(binaries): cached_supplier.add(binaries) - self.assertEqual(0, copy.call_count) - self.assertFalse(cached_supplier.cached) - self.assertIn("elasticsearch", binaries) - self.assertEqual("/path/to/artifact.tar.gz", binaries["elasticsearch"]) + assert copy.call_count == 0 + assert not cached_supplier.cached + assert "elasticsearch" in binaries + assert binaries["elasticsearch"] == "/path/to/artifact.tar.gz" @mock.patch("os.path.exists") @mock.patch("esrally.mechanic.supplier.ElasticsearchSourceSupplier") @@ -249,12 +247,12 @@ def test_uses_already_cached_artifact(self, es, path_exists): cached_supplier.add(binaries) - self.assertEqual(0, es.fetch.call_count) - self.assertEqual(0, es.prepare.call_count) - self.assertEqual(0, es.add.call_count) - self.assertTrue(cached_supplier.cached) - self.assertIn("elasticsearch", binaries) - self.assertEqual("/tmp/elasticsearch-abc123-linux-x86_64.tar.gz", binaries["elasticsearch"]) + assert es.fetch.call_count == 0 + assert es.prepare.call_count == 0 + assert es.add.call_count == 0 + assert cached_supplier.cached + assert "elasticsearch" in binaries + assert binaries["elasticsearch"] == "/tmp/elasticsearch-abc123-linux-x86_64.tar.gz" @mock.patch("esrally.utils.io.ensure_dir") @mock.patch("os.path.exists") @@ -291,10 +289,10 @@ def add_es_artifact(binaries): # path is cached now path_exists.return_value = True - self.assertEqual(1, copy.call_count, "artifact has been copied") - self.assertEqual(1, es.add.call_count, "artifact has been added by internal supplier") - self.assertTrue(cached_supplier.cached) - self.assertIn("elasticsearch", binaries) + assert copy.call_count == 1, "artifact has been copied" + assert es.add.call_count == 1, "artifact has been added by internal supplier" + assert cached_supplier.cached + assert "elasticsearch" in binaries # simulate a second attempt cached_supplier.fetch() @@ -303,10 +301,10 @@ def add_es_artifact(binaries): binaries = {} cached_supplier.add(binaries) - self.assertEqual(1, copy.call_count, "artifact has not been copied twice") + assert copy.call_count == 1, "artifact has not been copied twice" # the internal supplier did not get called again as we reuse the cached artifact - self.assertEqual(1, es.add.call_count, "internal supplier is not called again") - self.assertTrue(cached_supplier.cached) + assert es.add.call_count == 1, "internal supplier is not called again" + assert cached_supplier.cached @mock.patch("esrally.utils.io.ensure_dir") @mock.patch("os.path.exists") @@ -342,12 +340,12 @@ def add_es_artifact(binaries): cached_supplier.add(binaries) - self.assertEqual(1, copy.call_count, "artifact has been copied") - self.assertEqual(1, es.add.call_count, "artifact has been added by internal supplier") - self.assertFalse(cached_supplier.cached) - self.assertIn("elasticsearch", binaries) + assert copy.call_count == 1, "artifact has been copied" + assert es.add.call_count == 1, "artifact has been added by internal supplier" + assert not cached_supplier.cached + assert "elasticsearch" in binaries # still the uncached artifact - self.assertEqual("/path/to/artifact.tar.gz", binaries["elasticsearch"]) + assert binaries["elasticsearch"] == "/path/to/artifact.tar.gz" class ElasticsearchFileNameResolverTests(TestCase): @@ -367,18 +365,18 @@ def setUp(self): def test_resolve(self): self.resolver.revision = "abc123" - self.assertEqual("elasticsearch-abc123-linux-x86_64.tar.gz", self.resolver.file_name) + assert self.resolver.file_name == "elasticsearch-abc123-linux-x86_64.tar.gz" def test_artifact_key(self): - self.assertEqual("elasticsearch", self.resolver.artifact_key) + assert self.resolver.artifact_key == "elasticsearch" def test_to_artifact_path(self): file_system_path = "/tmp/test" - self.assertEqual(file_system_path, self.resolver.to_artifact_path(file_system_path)) + assert self.resolver.to_artifact_path(file_system_path) == file_system_path def test_to_file_system_path(self): artifact_path = "/tmp/test" - self.assertEqual(artifact_path, self.resolver.to_file_system_path(artifact_path)) + assert self.resolver.to_file_system_path(artifact_path) == artifact_path class PluginFileNameResolverTests(TestCase): @@ -388,18 +386,18 @@ def setUp(self): def test_resolve(self): self.resolver.revision = "abc123" - self.assertEqual("test-plugin-abc123.zip", self.resolver.file_name) + assert self.resolver.file_name == "test-plugin-abc123.zip" def test_artifact_key(self): - self.assertEqual("test-plugin", self.resolver.artifact_key) + assert self.resolver.artifact_key == "test-plugin" def test_to_artifact_path(self): file_system_path = "/tmp/test" - self.assertEqual(f"file://{file_system_path}", self.resolver.to_artifact_path(file_system_path)) + assert self.resolver.to_artifact_path(file_system_path) == f"file://{file_system_path}" def test_to_file_system_path(self): file_system_path = "/tmp/test" - self.assertEqual(file_system_path, self.resolver.to_file_system_path(f"file://{file_system_path}")) + assert self.resolver.to_file_system_path(f"file://{file_system_path}") == file_system_path class PruneTests(TestCase): @@ -415,7 +413,7 @@ def test_does_not_touch_nonexisting_directory(self, rm, lstat, isfile, listdir, supplier._prune(root_path="/tmp/test", max_age_days=7) - self.assertEqual(0, listdir.call_count, "attempted to list a non-existing directory") + assert listdir.call_count == 0, "attempted to list a non-existing directory" @mock.patch("os.path.exists") @mock.patch("os.listdir") @@ -489,11 +487,10 @@ def test_raises_error_on_missing_car_variable(self): car=car, builder=builder, template_renderer=renderer) - with self.assertRaisesRegex(exceptions.SystemSetupError, - "Car \"default\" requires config key \"system.build_command\""): + with pytest.raises(exceptions.SystemSetupError, match="Car \"default\" requires config key \"system.build_command\""): es.prepare() - self.assertEqual(0, builder.build.call_count) + assert builder.build.call_count == 0 @mock.patch("glob.glob", lambda p: ["elasticsearch.tar.gz"]) def test_add_elasticsearch_binary(self): @@ -511,7 +508,7 @@ def test_add_elasticsearch_binary(self): template_renderer=renderer) binaries = {} es.add(binaries=binaries) - self.assertEqual(binaries, {"elasticsearch": "elasticsearch.tar.gz"}) + assert {"elasticsearch": "elasticsearch.tar.gz"} == binaries class ExternalPluginSourceSupplierTests(TestCase): @@ -542,8 +539,7 @@ def setUp(self): builder=None) def test_invalid_config_no_source(self): - with self.assertRaisesRegex(exceptions.SystemSetupError, - "Neither plugin.some-plugin.src.dir nor plugin.some-plugin.src.subdir are set for plugin some-plugin."): + with pytest.raises(exceptions.SystemSetupError, match="Neither plugin.some-plugin.src.dir nor plugin.some-plugin.src.subdir are set for plugin some-plugin."): supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor("some-plugin", core_plugin=False), revision="abc", # built separately @@ -556,8 +552,7 @@ def test_invalid_config_no_source(self): builder=None) def test_invalid_config_duplicate_source(self): - with self.assertRaisesRegex(exceptions.SystemSetupError, - "Can only specify one of plugin.duplicate.src.dir and plugin.duplicate.src.subdir but both are set."): + with pytest.raises(exceptions.SystemSetupError, match="Can only specify one of plugin.duplicate.src.dir and plugin.duplicate.src.subdir but both are set."): supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor("duplicate", core_plugin=False), revision="abc", src_dir=None, @@ -569,24 +564,22 @@ def test_invalid_config_duplicate_source(self): builder=None) def test_standalone_plugin_overrides_build_dir(self): - self.assertEqual("/Projects/src/some-plugin", self.standalone.override_build_dir) + assert self.standalone.override_build_dir == "/Projects/src/some-plugin" def test_along_es_plugin_keeps_build_dir(self): - self.assertIsNone(self.along_es.override_build_dir) + assert self.along_es.override_build_dir is None @mock.patch("glob.glob", lambda p: ["/src/elasticsearch-extra/some-plugin/plugin/build/distributions/some-plugin.zip"]) def test_add_binary_built_along_elasticsearch(self): binaries = {} self.along_es.add(binaries) - self.assertDictEqual(binaries, - {"some-plugin": "file:///src/elasticsearch-extra/some-plugin/plugin/build/distributions/some-plugin.zip"}) + assert {"some-plugin": "file:///src/elasticsearch-extra/some-plugin/plugin/build/distributions/some-plugin.zip"} == binaries @mock.patch("glob.glob", lambda p: ["/Projects/src/some-plugin/build/distributions/some-plugin.zip"]) def test_resolve_plugin_binary_built_standalone(self): binaries = {} self.along_es.add(binaries) - self.assertDictEqual(binaries, - {"some-plugin": "file:///Projects/src/some-plugin/build/distributions/some-plugin.zip"}) + assert {"some-plugin": "file:///Projects/src/some-plugin/build/distributions/some-plugin.zip"} == binaries class CorePluginSourceSupplierTests(TestCase): @@ -598,7 +591,7 @@ def test_resolve_plugin_binary(self): builder=None) binaries = {} s.add(binaries) - self.assertDictEqual(binaries, {"core-plugin": "file:///src/elasticsearch/core-plugin/build/distributions/core-plugin.zip"}) + assert {"core-plugin": "file:///src/elasticsearch/core-plugin/build/distributions/core-plugin.zip"} == binaries class PluginDistributionSupplierTests(TestCase): @@ -611,7 +604,7 @@ def test_resolve_plugin_url(self): plugin=team.PluginDescriptor("custom-analyzer")) binaries = {} s.add(binaries) - self.assertDictEqual(binaries, {"custom-analyzer": "http://example.org/elasticearch/custom-analyzer-6.3.0.zip"}) + assert {"custom-analyzer": "http://example.org/elasticearch/custom-analyzer-6.3.0.zip"} == binaries class CreateSupplierTests(TestCase): @@ -619,13 +612,13 @@ def test_derive_supply_requirements_es_source_build(self): # corresponds to --revision="abc" requirements = supplier._supply_requirements( sources=True, distribution=False, plugins=[], revisions={"elasticsearch": "abc"}, distribution_version=None) - self.assertDictEqual({"elasticsearch": ("source", "abc", True)}, requirements) + assert requirements == {"elasticsearch": ("source", "abc", True)} def test_derive_supply_requirements_es_distribution(self): # corresponds to --distribution-version=6.0.0 requirements = supplier._supply_requirements( sources=False, distribution=True, plugins=[], revisions={}, distribution_version="6.0.0") - self.assertDictEqual({"elasticsearch": ("distribution", "6.0.0", False)}, requirements) + assert requirements == {"elasticsearch": ("distribution", "6.0.0", False)} def test_derive_supply_requirements_es_and_plugin_source_build(self): # corresponds to --revision="elasticsearch:abc,community-plugin:effab" @@ -635,12 +628,12 @@ def test_derive_supply_requirements_es_and_plugin_source_build(self): requirements = supplier._supply_requirements(sources=True, distribution=False, plugins=[core_plugin, external_plugin], revisions={"elasticsearch": "abc", "all": "abc", "community-plugin": "effab"}, distribution_version=None) - self.assertDictEqual({ + assert requirements == { "elasticsearch": ("source", "abc", True), # core plugin configuration is forced to be derived from ES "analysis-icu": ("source", "abc", True), "community-plugin": ("source", "effab", True), - }, requirements) + } def test_derive_supply_requirements_es_distribution_and_plugin_source_build(self): # corresponds to --revision="community-plugin:effab" --distribution-version="6.0.0" @@ -651,12 +644,12 @@ def test_derive_supply_requirements_es_distribution_and_plugin_source_build(self revisions={"community-plugin": "effab"}, distribution_version="6.0.0") # core plugin is not contained, its configured is forced to be derived by ES - self.assertDictEqual({ + assert requirements == { "elasticsearch": ("distribution", "6.0.0", False), # core plugin configuration is forced to be derived from ES "analysis-icu": ("distribution", "6.0.0", False), "community-plugin": ("source", "effab", True), - }, requirements) + } def test_create_suppliers_for_es_only_config(self): cfg = config.Config() @@ -673,8 +666,8 @@ def test_create_suppliers_for_es_only_config(self): composite_supplier = supplier.create(cfg, sources=False, distribution=True, car=car) - self.assertEqual(1, len(composite_supplier.suppliers)) - self.assertIsInstance(composite_supplier.suppliers[0], supplier.ElasticsearchDistributionSupplier) + assert len(composite_supplier.suppliers) == 1 + assert isinstance(composite_supplier.suppliers[0], supplier.ElasticsearchDistributionSupplier) @mock.patch("esrally.utils.jvm.resolve_path", lambda v: (v, "/opt/java/java{}".format(v))) def test_create_suppliers_for_es_distribution_plugin_source_build(self): @@ -701,13 +694,13 @@ def test_create_suppliers_for_es_distribution_plugin_source_build(self): external_plugin ]) - self.assertEqual(3, len(composite_supplier.suppliers)) - self.assertIsInstance(composite_supplier.suppliers[0], supplier.ElasticsearchDistributionSupplier) - self.assertIsInstance(composite_supplier.suppliers[1], supplier.PluginDistributionSupplier) - self.assertEqual(core_plugin, composite_supplier.suppliers[1].plugin) - self.assertIsInstance(composite_supplier.suppliers[2].source_supplier, supplier.ExternalPluginSourceSupplier) - self.assertEqual(external_plugin, composite_supplier.suppliers[2].source_supplier.plugin) - self.assertIsNotNone(composite_supplier.suppliers[2].source_supplier.builder) + assert len(composite_supplier.suppliers) == 3 + assert isinstance(composite_supplier.suppliers[0], supplier.ElasticsearchDistributionSupplier) + assert isinstance(composite_supplier.suppliers[1], supplier.PluginDistributionSupplier) + assert composite_supplier.suppliers[1].plugin == core_plugin + assert isinstance(composite_supplier.suppliers[2].source_supplier, supplier.ExternalPluginSourceSupplier) + assert composite_supplier.suppliers[2].source_supplier.plugin == external_plugin + assert composite_supplier.suppliers[2].source_supplier.builder is not None @mock.patch("esrally.utils.jvm.resolve_path", lambda v: (v, "/opt/java/java{}".format(v))) def test_create_suppliers_for_es_and_plugin_source_build(self): @@ -737,13 +730,13 @@ def test_create_suppliers_for_es_and_plugin_source_build(self): external_plugin ]) - self.assertEqual(3, len(composite_supplier.suppliers)) - self.assertIsInstance(composite_supplier.suppliers[0].source_supplier, supplier.ElasticsearchSourceSupplier) - self.assertIsInstance(composite_supplier.suppliers[1].source_supplier, supplier.CorePluginSourceSupplier) - self.assertEqual(core_plugin, composite_supplier.suppliers[1].source_supplier.plugin) - self.assertIsInstance(composite_supplier.suppliers[2].source_supplier, supplier.ExternalPluginSourceSupplier) - self.assertEqual(external_plugin, composite_supplier.suppliers[2].source_supplier.plugin) - self.assertIsNotNone(composite_supplier.suppliers[2].source_supplier.builder) + assert len(composite_supplier.suppliers) == 3 + assert isinstance(composite_supplier.suppliers[0].source_supplier, supplier.ElasticsearchSourceSupplier) + assert isinstance(composite_supplier.suppliers[1].source_supplier, supplier.CorePluginSourceSupplier) + assert composite_supplier.suppliers[1].source_supplier.plugin == core_plugin + assert isinstance(composite_supplier.suppliers[2].source_supplier, supplier.ExternalPluginSourceSupplier) + assert composite_supplier.suppliers[2].source_supplier.plugin == external_plugin + assert composite_supplier.suppliers[2].source_supplier.builder is not None class DistributionRepositoryTests(TestCase): @@ -757,9 +750,9 @@ def test_release_repo_config_with_default_url(self, os_name, cpu_arch): "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}-{{OSNAME}}-{{ARCH}}.tar.gz", "release.cache": "true" }, template_renderer=renderer) - self.assertEqual("https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.2-linux-x86_64.tar.gz", repo.download_url) - self.assertEqual("elasticsearch-7.3.2-linux-x86_64.tar.gz", repo.file_name) - self.assertTrue(repo.cache) + assert repo.download_url == "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.2-linux-x86_64.tar.gz" + assert repo.file_name == "elasticsearch-7.3.2-linux-x86_64.tar.gz" + assert repo.cache def test_release_repo_config_with_user_url(self): renderer = supplier.TemplateRenderer(version="2.4.3") @@ -770,9 +763,9 @@ def test_release_repo_config_with_user_url(self): "release.url": "https://es-mirror.example.org/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz", "release.cache": "false" }, template_renderer=renderer) - self.assertEqual("https://es-mirror.example.org/downloads/elasticsearch/elasticsearch-2.4.3.tar.gz", repo.download_url) - self.assertEqual("elasticsearch-2.4.3.tar.gz", repo.file_name) - self.assertFalse(repo.cache) + assert repo.download_url == "https://es-mirror.example.org/downloads/elasticsearch/elasticsearch-2.4.3.tar.gz" + assert repo.file_name == "elasticsearch-2.4.3.tar.gz" + assert not repo.cache def test_missing_url(self): renderer = supplier.TemplateRenderer(version="2.4.3") @@ -781,11 +774,11 @@ def test_missing_url(self): "runtime.jdk.bundled": "false", "release.cache": "true" }, template_renderer=renderer) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: # pylint: disable=pointless-statement # noinspection PyStatementEffect repo.download_url - self.assertEqual("Neither config key [miss.url] nor [jdk.unbundled.miss_url] is defined.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Neither config key [miss.url] nor [jdk.unbundled.miss_url] is defined." def test_missing_cache(self): renderer = supplier.TemplateRenderer(version="2.4.3") @@ -793,11 +786,11 @@ def test_missing_cache(self): "jdk.unbundled.release.url": "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz", "runtime.jdk.bundled": "false" }, template_renderer=renderer) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: # pylint: disable=pointless-statement # noinspection PyStatementEffect repo.cache - self.assertEqual("Mandatory config key [release.cache] is undefined.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Mandatory config key [release.cache] is undefined." def test_invalid_cache_value(self): renderer = supplier.TemplateRenderer(version="2.4.3") @@ -806,11 +799,11 @@ def test_invalid_cache_value(self): "runtime.jdk.bundled": "false", "release.cache": "Invalid" }, template_renderer=renderer) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: # pylint: disable=pointless-statement # noinspection PyStatementEffect repo.cache - self.assertEqual("Value [Invalid] for config key [release.cache] is not a valid boolean value.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Value [Invalid] for config key [release.cache] is not a valid boolean value." def test_plugin_config_with_default_url(self): renderer = supplier.TemplateRenderer(version="5.5.0") @@ -818,7 +811,7 @@ def test_plugin_config_with_default_url(self): "runtime.jdk.bundled": "false", "plugin_example_release_url": "https://artifacts.example.org/downloads/plugins/example-{{VERSION}}.zip" }, template_renderer=renderer) - self.assertEqual("https://artifacts.example.org/downloads/plugins/example-5.5.0.zip", repo.plugin_download_url("example")) + assert repo.plugin_download_url("example") == "https://artifacts.example.org/downloads/plugins/example-5.5.0.zip" def test_plugin_config_with_user_url(self): renderer = supplier.TemplateRenderer(version="5.5.0") @@ -828,11 +821,11 @@ def test_plugin_config_with_user_url(self): # user override "plugin.example.release.url": "https://mirror.example.org/downloads/plugins/example-{{VERSION}}.zip" }, template_renderer=renderer) - self.assertEqual("https://mirror.example.org/downloads/plugins/example-5.5.0.zip", repo.plugin_download_url("example")) + assert repo.plugin_download_url("example") == "https://mirror.example.org/downloads/plugins/example-5.5.0.zip" def test_missing_plugin_config(self): renderer = supplier.TemplateRenderer(version="5.5.0") repo = supplier.DistributionRepository(name="release", distribution_config={ "runtime.jdk.bundled": "false", }, template_renderer=renderer) - self.assertIsNone(repo.plugin_download_url("not existing")) + assert repo.plugin_download_url("not existing") is None diff --git a/tests/mechanic/team_test.py b/tests/mechanic/team_test.py index 201459b6c..0912e9d8e 100644 --- a/tests/mechanic/team_test.py +++ b/tests/mechanic/team_test.py @@ -17,6 +17,9 @@ import os from unittest import TestCase +import re + +import pytest from esrally import exceptions from esrally.mechanic import team @@ -43,91 +46,91 @@ def test_lists_car_names(self): def test_load_known_car(self): car = team.load_car(self.team_dir, ["default"], car_params={"data_paths": ["/mnt/disk0", "/mnt/disk1"]}) - self.assertEqual("default", car.name) - self.assertEqual([os.path.join(current_dir, "data", "cars", "v1", "vanilla", "templates")], car.config_paths) - self.assertIsNone(car.root_path) - self.assertDictEqual({ + assert car.name == "default" + assert car.config_paths == [os.path.join(current_dir, "data", "cars", "v1", "vanilla", "templates")] + assert car.root_path is None + assert car.variables == { "heap_size": "1g", "clean_command": "./gradlew clean", "data_paths": ["/mnt/disk0", "/mnt/disk1"] - }, car.variables) - self.assertIsNone(car.root_path) + } + assert car.root_path is None def test_load_car_with_mixin_single_config_base(self): car = team.load_car(self.team_dir, ["32gheap", "ea"]) - self.assertEqual("32gheap+ea", car.name) - self.assertEqual([os.path.join(current_dir, "data", "cars", "v1", "vanilla", "templates")], car.config_paths) - self.assertIsNone(car.root_path) - self.assertEqual({ + assert car.name == "32gheap+ea" + assert car.config_paths == [os.path.join(current_dir, "data", "cars", "v1", "vanilla", "templates")] + assert car.root_path is None + assert car.variables == { "heap_size": "32g", "clean_command": "./gradlew clean", "assertions": "true" - }, car.variables) - self.assertIsNone(car.root_path) + } + assert car.root_path is None def test_load_car_with_mixin_multiple_config_bases(self): car = team.load_car(self.team_dir, ["32gheap", "ea", "verbose"]) - self.assertEqual("32gheap+ea+verbose", car.name) - self.assertEqual([ + assert car.name == "32gheap+ea+verbose" + assert car.config_paths == [ os.path.join(current_dir, "data", "cars", "v1", "vanilla", "templates"), os.path.join(current_dir, "data", "cars", "v1", "verbose_logging", "templates"), - ], car.config_paths) - self.assertIsNone(car.root_path) - self.assertEqual({ + ] + assert car.root_path is None + assert car.variables == { "heap_size": "32g", "clean_command": "./gradlew clean", "verbose_logging": "true", "assertions": "true" - }, car.variables) + } def test_load_car_with_install_hook(self): car = team.load_car(self.team_dir, ["default", "with_hook"], car_params={"data_paths": ["/mnt/disk0", "/mnt/disk1"]}) - self.assertEqual("default+with_hook", car.name) - self.assertEqual([ + assert car.name == "default+with_hook" + assert car.config_paths == [ os.path.join(current_dir, "data", "cars", "v1", "vanilla", "templates"), os.path.join(current_dir, "data", "cars", "v1", "with_hook", "templates"), - ], car.config_paths) - self.assertEqual(os.path.join(current_dir, "data", "cars", "v1", "with_hook"), car.root_path) - self.assertDictEqual({ + ] + assert car.root_path == os.path.join(current_dir, "data", "cars", "v1", "with_hook") + assert car.variables == { "heap_size": "1g", "clean_command": "./gradlew clean", "data_paths": ["/mnt/disk0", "/mnt/disk1"] - }, car.variables) + } def test_load_car_with_multiple_bases_referring_same_install_hook(self): car = team.load_car(self.team_dir, ["with_hook", "another_with_hook"]) - self.assertEqual("with_hook+another_with_hook", car.name) - self.assertEqual([ + assert car.name == "with_hook+another_with_hook" + assert car.config_paths == [ os.path.join(current_dir, "data", "cars", "v1", "vanilla", "templates"), os.path.join(current_dir, "data", "cars", "v1", "with_hook", "templates"), os.path.join(current_dir, "data", "cars", "v1", "verbose_logging", "templates") - ], car.config_paths) - self.assertEqual(os.path.join(current_dir, "data", "cars", "v1", "with_hook"), car.root_path) - self.assertDictEqual({ + ] + assert car.root_path == os.path.join(current_dir, "data", "cars", "v1", "with_hook") + assert car.variables == { "heap_size": "16g", "clean_command": "./gradlew clean", "verbose_logging": "true" - }, car.variables) + } def test_raises_error_on_unknown_car(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: team.load_car(self.team_dir, ["don_t-know-you"]) - self.assertRegex(ctx.exception.args[0], r"Unknown car \[don_t-know-you\]. List the available cars with [^\s]+ list cars.") + assert re.search(r"Unknown car \[don_t-know-you\]. List the available cars with [^\s]+ list cars.", ctx.value.args[0]) def test_raises_error_on_empty_config_base(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: team.load_car(self.team_dir, ["empty_cfg_base"]) - self.assertEqual("At least one config base is required for car ['empty_cfg_base']", ctx.exception.args[0]) + assert ctx.value.args[0] == "At least one config base is required for car ['empty_cfg_base']" def test_raises_error_on_missing_config_base(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: team.load_car(self.team_dir, ["missing_cfg_base"]) - self.assertEqual("At least one config base is required for car ['missing_cfg_base']", ctx.exception.args[0]) + assert ctx.value.args[0] == "At least one config base is required for car ['missing_cfg_base']" def test_raises_error_if_more_than_one_different_install_hook(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: team.load_car(self.team_dir, ["multi_hook"]) - self.assertEqual("Invalid car: ['multi_hook']. Multiple bootstrap hooks are forbidden.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Invalid car: ['multi_hook']. Multiple bootstrap hooks are forbidden." class PluginLoaderTests(TestCase): @@ -149,63 +152,62 @@ def test_lists_plugins(self): ], self.loader.plugins()) def test_loads_core_plugin(self): - self.assertEqual(team.PluginDescriptor(name="my-analysis-plugin", core_plugin=True, variables={"dbg": True}), - self.loader.load_plugin("my-analysis-plugin", config_names=None, plugin_params={"dbg": True})) + assert self.loader.load_plugin("my-analysis-plugin", config_names=None, plugin_params={"dbg": True}) == team.PluginDescriptor(name="my-analysis-plugin", core_plugin=True, variables={"dbg": True}) def test_loads_core_plugin_with_config(self): plugin = self.loader.load_plugin("my-core-plugin-with-config", config_names=None, plugin_params={"dbg": True}) - self.assertEqual("my-core-plugin-with-config", plugin.name) - self.assertTrue(plugin.core_plugin) + assert plugin.name == "my-core-plugin-with-config" + assert plugin.core_plugin expected_root_path = os.path.join(current_dir, "data", "plugins", "v1", "my_core_plugin_with_config") - self.assertEqual(expected_root_path, plugin.root_path) - self.assertEqual(0, len(plugin.config_paths)) + assert plugin.root_path == expected_root_path + assert len(plugin.config_paths) == 0 - self.assertEqual({ + assert plugin.variables == { # from plugin params "dbg": True - }, plugin.variables) + } def test_cannot_load_plugin_with_missing_config(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: self.loader.load_plugin("my-analysis-plugin", ["missing-config"]) - self.assertRegex(ctx.exception.args[0], r"Plugin \[my-analysis-plugin\] does not provide configuration \[missing-config\]. List the" - r" available plugins and configurations with [^\s]+ list elasticsearch-plugins " - r"--distribution-version=VERSION.") + assert re.search(r"Plugin \[my-analysis-plugin\] does not provide configuration \[missing-config\]. List the" \ + r" available plugins and configurations with [^\s]+ list elasticsearch-plugins " \ + r"--distribution-version=VERSION.", ctx.value.args[0]) def test_loads_community_plugin_without_configuration(self): - self.assertEqual(team.PluginDescriptor("my-community-plugin"), self.loader.load_plugin("my-community-plugin", None)) + assert self.loader.load_plugin("my-community-plugin", None) == team.PluginDescriptor("my-community-plugin") def test_cannot_load_community_plugin_with_missing_config(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: self.loader.load_plugin("my-community-plugin", "some-configuration") - self.assertRegex(ctx.exception.args[0], r"Unknown plugin \[my-community-plugin\]. List the available plugins with [^\s]+ list " - r"elasticsearch-plugins --distribution-version=VERSION.") + assert re.search(r"Unknown plugin \[my-community-plugin\]. List the available plugins with [^\s]+ list " \ + r"elasticsearch-plugins --distribution-version=VERSION.", ctx.value.args[0]) def test_loads_configured_plugin(self): plugin = self.loader.load_plugin("complex-plugin", ["config-a", "config-b"], plugin_params={"dbg": True}) - self.assertEqual("complex-plugin", plugin.name) - self.assertFalse(plugin.core_plugin) + assert plugin.name == "complex-plugin" + assert not plugin.core_plugin self.assertCountEqual(["config-a", "config-b"], plugin.config) expected_root_path = os.path.join(current_dir, "data", "plugins", "v1", "complex_plugin") - self.assertEqual(expected_root_path, plugin.root_path) + assert plugin.root_path == expected_root_path # order does matter here! We should not swap it - self.assertListEqual([ + assert plugin.config_paths == [ os.path.join(expected_root_path, "default", "templates"), os.path.join(expected_root_path, "special", "templates"), - ], plugin.config_paths) + ] - self.assertEqual({ + assert plugin.variables == { "foo": "bar", "baz": "foo", "var": "0", "hello": "true", # from plugin params "dbg": True - }, plugin.variables) + } class BootstrapHookHandlerTests(TestCase): @@ -243,7 +245,7 @@ def test_loads_module(self): handler.invoke("post_install", variables={"increment": 4}) # we registered our hook twice. Check that it has been called twice. - self.assertEqual(hook.call_counter, 2 * 4) + assert 2 * 4 == hook.call_counter def test_cannot_register_for_unknown_phase(self): plugin = team.PluginDescriptor("unittest-plugin") @@ -251,7 +253,6 @@ def test_cannot_register_for_unknown_phase(self): handler = team.BootstrapHookHandler(plugin, loader_class=BootstrapHookHandlerTests.UnitTestComponentLoader) handler.loader.registration_function = hook - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: handler.load() - self.assertEqual("Unknown bootstrap phase [this_is_an_unknown_install_phase]. Valid phases are: ['post_install'].", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Unknown bootstrap phase [this_is_an_unknown_install_phase]. Valid phases are: ['post_install']." diff --git a/tests/metrics_test.py b/tests/metrics_test.py index 9ca821f8e..5a16b87c4 100644 --- a/tests/metrics_test.py +++ b/tests/metrics_test.py @@ -28,6 +28,7 @@ from unittest import TestCase import elasticsearch.exceptions +import pytest from esrally import config, metrics, track, exceptions, paths from esrally.metrics import GlobalStatsCalculator @@ -114,26 +115,26 @@ def side_effects(self): class ExtractUserTagsTests(TestCase): def test_no_tags_returns_empty_dict(self): cfg = config.Config() - self.assertEqual(0, len(metrics.extract_user_tags_from_config(cfg))) + assert len(metrics.extract_user_tags_from_config(cfg)) == 0 def test_missing_comma_raises_error(self): cfg = config.Config() cfg.add(config.Scope.application, "race", "user.tag", "invalid") - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: metrics.extract_user_tags_from_config(cfg) - self.assertEqual("User tag keys and values have to separated by a ':'. Invalid value [invalid]", ctx.exception.args[0]) + assert ctx.value.args[0] == "User tag keys and values have to separated by a ':'. Invalid value [invalid]" def test_missing_value_raises_error(self): cfg = config.Config() cfg.add(config.Scope.application, "race", "user.tag", "invalid1,invalid2") - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: metrics.extract_user_tags_from_config(cfg) - self.assertEqual("User tag keys and values have to separated by a ':'. Invalid value [invalid1,invalid2]", ctx.exception.args[0]) + assert ctx.value.args[0] == "User tag keys and values have to separated by a ':'. Invalid value [invalid1,invalid2]" def test_extracts_proper_user_tags(self): cfg = config.Config() cfg.add(config.Scope.application, "race", "user.tag", "os:Linux,cpu:ARM") - self.assertDictEqual({"os": "Linux", "cpu": "ARM"}, metrics.extract_user_tags_from_config(cfg)) + assert metrics.extract_user_tags_from_config(cfg) == {"os": "Linux", "cpu": "ARM"} class EsClientTests(TestCase): @@ -184,11 +185,10 @@ def raise_connection_error(): client = metrics.EsClient(EsClientTests.ClientMock([{"host": "127.0.0.1", "port": "9200"}])) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: client.guarded(raise_connection_error) - self.assertEqual("Could not connect to your Elasticsearch metrics store. Please check that it is running on host [127.0.0.1] at " - "port [9200] or fix the configuration in [%s/rally.ini]." % paths.rally_confdir(), - ctx.exception.args[0]) + assert ctx.value.args[0] == "Could not connect to your Elasticsearch metrics store. Please check that it is running on host [127.0.0.1] at " \ + "port [9200] or fix the configuration in [%s/rally.ini]." % paths.rally_confdir() def test_raises_sytem_setup_error_on_authentication_problems(self): def raise_authentication_error(): @@ -196,11 +196,11 @@ def raise_authentication_error(): client = metrics.EsClient(EsClientTests.ClientMock([{"host": "127.0.0.1", "port": "9243"}])) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: client.guarded(raise_authentication_error) - self.assertEqual("The configured user could not authenticate against your Elasticsearch metrics store running on host [127.0.0.1] " - "at port [9243] (wrong password?). Please fix the configuration in [%s/rally.ini]." - % paths.rally_confdir(), ctx.exception.args[0]) + assert ctx.value.args[0] == "The configured user could not authenticate against your Elasticsearch metrics store running on host [127.0.0.1] " \ + "at port [9243] (wrong password?). Please fix the configuration in [%s/rally.ini]." \ + % paths.rally_confdir() def test_raises_sytem_setup_error_on_authorization_problems(self): def raise_authorization_error(): @@ -208,12 +208,12 @@ def raise_authorization_error(): client = metrics.EsClient(EsClientTests.ClientMock([{"host": "127.0.0.1", "port": "9243"}])) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: client.guarded(raise_authorization_error) - self.assertEqual("The configured user does not have enough privileges to run the operation [raise_authorization_error] against " - "your Elasticsearch metrics store running on host [127.0.0.1] at port [9243]. Please adjust your x-pack " - "configuration or specify a user with enough privileges in the configuration in [%s/rally.ini]." - % paths.rally_confdir(), ctx.exception.args[0]) + assert ctx.value.args[0] == "The configured user does not have enough privileges to run the operation [raise_authorization_error] against " \ + "your Elasticsearch metrics store running on host [127.0.0.1] at port [9243]. Please adjust your x-pack " \ + "configuration or specify a user with enough privileges in the configuration in [%s/rally.ini]." \ + % paths.rally_confdir() def test_raises_rally_error_on_unknown_problems(self): def raise_unknown_error(): @@ -221,10 +221,10 @@ def raise_unknown_error(): client = metrics.EsClient(EsClientTests.ClientMock([{"host": "127.0.0.1", "port": "9243"}])) - with self.assertRaises(exceptions.RallyError) as ctx: + with pytest.raises(exceptions.RallyError) as ctx: client.guarded(raise_unknown_error) - self.assertEqual("An unknown error occurred while running the operation [raise_unknown_error] against your Elasticsearch metrics " - "store on host [127.0.0.1] at port [9243].", ctx.exception.args[0]) + assert ctx.value.args[0] == "An unknown error occurred while running the operation [raise_unknown_error] against your Elasticsearch metrics " \ + "store on host [127.0.0.1] at port [9243]." def test_retries_on_various_transport_errors(self): @mock.patch("random.random") @@ -246,7 +246,7 @@ def test_transport_error_retries(side_effect, expected_logging_calls, expected_s expected_logging_calls, any_order=True ) - self.assertEqual("success", test_result) + assert test_result == "success" max_retry = 10 all_err_codes = TransportErrors.err_return_codes @@ -280,13 +280,12 @@ def random_transport_error(rnd_resp_code): client = metrics.EsClient(EsClientTests.ClientMock([{"host": "127.0.0.1", "port": "9243"}])) rnd_code = random.choice(list(TransportErrors.err_return_codes)) - with self.assertRaises(exceptions.RallyError) as ctx: + with pytest.raises(exceptions.RallyError) as ctx: client.guarded(random_transport_error, rnd_code) - self.assertEqual("A transport error occurred while running the operation " - "[random_transport_error] against your Elasticsearch metrics " - "store on host [127.0.0.1] at port [9243].", - ctx.exception.args[0]) + assert ctx.value.args[0] == "A transport error occurred while running the operation " \ + "[random_transport_error] against your Elasticsearch metrics " \ + "store on host [127.0.0.1] at port [9243]." class EsMetricsTests(TestCase): @@ -548,7 +547,7 @@ def test_get_one(self): self.es_mock.search.assert_called_with(index="rally-metrics-2016-01", body=expected_query) - self.assertEqual(duration, actual_duration) + assert actual_duration == duration def test_get_one_no_hits(self): duration = None @@ -595,7 +594,7 @@ def test_get_one_no_hits(self): self.es_mock.search.assert_called_with(index="rally-metrics-2016-01", body=expected_query) - self.assertEqual(duration, actual_duration) + assert actual_duration == duration def test_get_value(self): throughput = 5000 @@ -640,7 +639,7 @@ def test_get_value(self): self.es_mock.search.assert_called_with(index="rally-metrics-2016-01", body=expected_query) - self.assertEqual(throughput, actual_throughput) + assert actual_throughput == throughput def test_get_per_node_value(self): index_size = 5000 @@ -690,7 +689,7 @@ def test_get_per_node_value(self): self.es_mock.search.assert_called_with(index="rally-metrics-2016-01", body=expected_query) - self.assertEqual(index_size, actual_index_size) + assert actual_index_size == index_size def test_get_mean(self): mean_throughput = 1734 @@ -748,7 +747,7 @@ def test_get_mean(self): self.es_mock.search.assert_called_with(index="rally-metrics-2016-01", body=expected_query) - self.assertEqual(mean_throughput, actual_mean_throughput) + assert actual_mean_throughput == mean_throughput def test_get_median(self): median_throughput = 30535 @@ -805,20 +804,20 @@ def test_get_median(self): self.es_mock.search.assert_called_with(index="rally-metrics-2016-01", body=expected_query) - self.assertEqual(median_throughput, actual_median_throughput) + assert actual_median_throughput == median_throughput def test_get_error_rate_implicit_zero(self): - self.assertEqual(0.0, self._get_error_rate(buckets=[ + assert self._get_error_rate(buckets=[ { "key": 1, "key_as_string": "true", "doc_count": 0 } - ])) + ]) == 0.0 def test_get_error_rate_explicit_zero(self): - self.assertEqual(0.0, self._get_error_rate(buckets=[ + assert self._get_error_rate(buckets=[ { "key": 0, "key_as_string": "false", @@ -829,19 +828,19 @@ def test_get_error_rate_explicit_zero(self): "key_as_string": "true", "doc_count": 500 } - ])) + ]) == 0.0 def test_get_error_rate_implicit_one(self): - self.assertEqual(1.0, self._get_error_rate(buckets=[ + assert self._get_error_rate(buckets=[ { "key": 0, "key_as_string": "false", "doc_count": 123 } - ])) + ]) == 1.0 def test_get_error_rate_explicit_one(self): - self.assertEqual(1.0, self._get_error_rate(buckets=[ + assert self._get_error_rate(buckets=[ { "key": 0, "key_as_string": "false", @@ -852,10 +851,10 @@ def test_get_error_rate_explicit_one(self): "key_as_string": "true", "doc_count": 0 } - ])) + ]) == 1.0 def test_get_error_rate_mixed(self): - self.assertEqual(0.5, self._get_error_rate(buckets=[ + assert self._get_error_rate(buckets=[ { "key": 0, "key_as_string": "false", @@ -866,10 +865,10 @@ def test_get_error_rate_mixed(self): "key_as_string": "true", "doc_count": 500 } - ])) + ]) == 0.5 def test_get_error_rate_additional_unknown_key(self): - self.assertEqual(0.25, self._get_error_rate(buckets=[ + assert self._get_error_rate(buckets=[ { "key": 0, "key_as_string": "false", @@ -885,7 +884,7 @@ def test_get_error_rate_additional_unknown_key(self): "key_as_string": "undefined_for_test", "doc_count": 13700 } - ])) + ]) == 0.25 def _get_error_rate(self, buckets): search_result = { @@ -992,7 +991,7 @@ def test_find_existing_race_by_race_id(self): } race = self.race_store.find_by_race_id(race_id=EsRaceStoreTests.RACE_ID) - self.assertEqual(race.race_id, EsRaceStoreTests.RACE_ID) + assert EsRaceStoreTests.RACE_ID == race.race_id def test_does_not_find_missing_race_by_race_id(self): self.es_mock.search.return_value = { @@ -1005,7 +1004,7 @@ def test_does_not_find_missing_race_by_race_id(self): } } - with self.assertRaisesRegex(exceptions.NotFound, r"No race with race id \[.*\]"): + with pytest.raises(exceptions.NotFound, match=r"No race with race id \[.*\]"): self.race_store.find_by_race_id(race_id="some invalid race id") def test_store_race(self): @@ -1413,7 +1412,7 @@ def test_get_one(self): actual_duration = self.metrics_store.get_one("service_time", task="task1", mapper=lambda doc: doc["relative-time-ms"], sort_key="relative-time-ms", sort_reverse=True) - self.assertEqual(duration * 1000, actual_duration) + assert actual_duration == duration * 1000 def test_get_one_no_hits(self): duration = StaticClock.NOW @@ -1429,7 +1428,7 @@ def test_get_one_no_hits(self): actual_duration = self.metrics_store.get_one("service_time", task="task1", mapper=lambda doc: doc["relative-time-ms"], sort_key="relative-time-ms", sort_reverse=True) - self.assertIsNone(actual_duration) + assert actual_duration is None def test_get_value(self): throughput = 5000 @@ -1444,8 +1443,8 @@ def test_get_value(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, "test", "append-no-conflicts", "defaults") - self.assertEqual(1, self.metrics_store.get_one("indexing_throughput", sample_type=metrics.SampleType.Warmup)) - self.assertEqual(throughput, self.metrics_store.get_one("indexing_throughput", sample_type=metrics.SampleType.Normal)) + assert self.metrics_store.get_one("indexing_throughput", sample_type=metrics.SampleType.Warmup) == 1 + assert self.metrics_store.get_one("indexing_throughput", sample_type=metrics.SampleType.Normal) == throughput def test_get_percentile(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, @@ -1476,7 +1475,7 @@ def test_get_mean(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, "test", "append-no-conflicts", "defaults") - self.assertAlmostEqual(50, self.metrics_store.get_mean("query_latency")) + assert round(abs(50-self.metrics_store.get_mean("query_latency")), 7) == 0 def test_get_median(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, @@ -1489,32 +1488,32 @@ def test_get_median(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, "test", "append-no-conflicts", "defaults") - self.assertAlmostEqual(500.5, self.metrics_store.get_median("query_latency")) + assert round(abs(500.5-self.metrics_store.get_median("query_latency")), 7) == 0 def assert_equal_percentiles(self, name, percentiles, expected_percentiles): actual_percentiles = self.metrics_store.get_percentiles(name, percentiles=percentiles) - self.assertEqual(len(expected_percentiles), len(actual_percentiles)) + assert len(actual_percentiles) == len(expected_percentiles) for percentile, actual_percentile_value in actual_percentiles.items(): - self.assertAlmostEqual(expected_percentiles[percentile], actual_percentile_value, places=1, - msg=str(percentile) + "th percentile differs") + assert round(abs(expected_percentiles[percentile]-actual_percentile_value), 1) \ + == 0.0, str(percentile) + "th percentile differs" def test_externalize_and_bulk_add(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, "test", "append-no-conflicts", "defaults", create=True) self.metrics_store.put_value_cluster_level("final_index_size", 1000, "GB") - self.assertEqual(1, len(self.metrics_store.docs)) + assert len(self.metrics_store.docs) == 1 memento = self.metrics_store.to_externalizable() self.metrics_store.close() del self.metrics_store self.metrics_store = metrics.InMemoryMetricsStore(self.cfg, clock=StaticClock) - self.assertEqual(0, len(self.metrics_store.docs)) + assert len(self.metrics_store.docs) == 0 self.metrics_store.bulk_add(memento) - self.assertEqual(1, len(self.metrics_store.docs)) - self.assertEqual(1000, self.metrics_store.get_one("final_index_size")) + assert len(self.metrics_store.docs) == 1 + assert self.metrics_store.get_one("final_index_size") == 1000 def test_meta_data_per_document(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, @@ -1528,16 +1527,16 @@ def test_meta_data_per_document(self): "io-batch-size-kb": 4 }) - self.assertEqual(2, len(self.metrics_store.docs)) - self.assertEqual({ + assert len(self.metrics_store.docs) == 2 + assert self.metrics_store.docs[0]["meta"] == { "cluster-name": "test", "fs-block-size-bytes": 512 - }, self.metrics_store.docs[0]["meta"]) + } - self.assertEqual({ + assert self.metrics_store.docs[1]["meta"] == { "cluster-name": "test", "io-batch-size-kb": 4 - }, self.metrics_store.docs[1]["meta"]) + } def test_get_error_rate_zero_without_samples(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, @@ -1547,7 +1546,7 @@ def test_get_error_rate_zero_without_samples(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, "test", "append-no-conflicts", "defaults") - self.assertEqual(0.0, self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Normal)) + assert self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Normal) == 0.0 def test_get_error_rate_by_sample_type(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, @@ -1562,8 +1561,8 @@ def test_get_error_rate_by_sample_type(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, "test", "append-no-conflicts", "defaults") - self.assertEqual(1.0, self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Warmup)) - self.assertEqual(0.0, self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Normal)) + assert self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Warmup) == 1.0 + assert self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Normal) == 0.0 def test_get_error_rate_mixed(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, @@ -1584,8 +1583,8 @@ def test_get_error_rate_mixed(self): self.metrics_store.open(InMemoryMetricsStoreTests.RACE_ID, InMemoryMetricsStoreTests.RACE_TIMESTAMP, "test", "append-no-conflicts", "defaults") - self.assertEqual(0.0, self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Warmup)) - self.assertEqual(0.2, self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Normal)) + assert self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Warmup) == 0.0 + assert self.metrics_store.get_error_rate("term-query", sample_type=metrics.SampleType.Normal) == 0.2 class FileRaceStoreTests(TestCase): @@ -1609,7 +1608,7 @@ def setUp(self): self.race_store = metrics.FileRaceStore(self.cfg) def test_race_not_found(self): - with self.assertRaisesRegex(exceptions.NotFound, r"No race with race id \[.*\]"): + with pytest.raises(exceptions.NotFound, match=r"No race with race id \[.*\]"): # did not store anything yet self.race_store.find_by_race_id(FileRaceStoreTests.RACE_ID) @@ -1650,9 +1649,9 @@ def test_store_race(self): self.race_store.store_race(race) retrieved_race = self.race_store.find_by_race_id(race_id=FileRaceStoreTests.RACE_ID) - self.assertEqual(race.race_id, retrieved_race.race_id) - self.assertEqual(race.race_timestamp, retrieved_race.race_timestamp) - self.assertEqual(1, len(self.race_store.list())) + assert retrieved_race.race_id == race.race_id + assert retrieved_race.race_timestamp == race.race_timestamp + assert len(self.race_store.list()) == 1 class StatsCalculatorTests(TestCase): @@ -1719,27 +1718,27 @@ def test_calculate_global_stats(self): del store opm = stats.metrics("index #1") - self.assertEqual(collections.OrderedDict( - [("min", 500), ("mean", 1125), ("median", 1000), ("max", 2000), ("unit", "docs/s")]), opm["throughput"]) - self.assertEqual(collections.OrderedDict( - [("50_0", 220), ("100_0", 225), ("mean", 215), ("unit", "ms")]), opm["latency"]) - self.assertEqual(collections.OrderedDict( - [("50_0", 200), ("100_0", 210), ("mean", 200), ("unit", "ms")]), opm["service_time"]) - self.assertAlmostEqual(0.3333333333333333, opm["error_rate"]) - self.assertEqual(709*1000, opm["duration"]) + assert opm["throughput"] == collections.OrderedDict( + [("min", 500), ("mean", 1125), ("median", 1000), ("max", 2000), ("unit", "docs/s")]) + assert opm["latency"] == collections.OrderedDict( + [("50_0", 220), ("100_0", 225), ("mean", 215), ("unit", "ms")]) + assert opm["service_time"] == collections.OrderedDict( + [("50_0", 200), ("100_0", 210), ("mean", 200), ("unit", "ms")]) + assert round(abs(0.3333333333333333-opm["error_rate"]), 7) == 0 + assert opm["duration"] == 709*1000 opm2 = stats.metrics("index #2") - self.assertEqual(collections.OrderedDict( - [("min", None), ("mean", None), ("median", None), ("max", None), ("unit", "docs/s")]), opm2["throughput"]) - - self.assertEqual(1, len(stats.ml_processing_time)) - self.assertEqual("benchmark_ml_job_1", stats.ml_processing_time[0]["job"]) - self.assertEqual(2.2, stats.ml_processing_time[0]["min"]) - self.assertEqual(12.3, stats.ml_processing_time[0]["mean"]) - self.assertEqual(17.2, stats.ml_processing_time[0]["median"]) - self.assertEqual(36.0, stats.ml_processing_time[0]["max"]) - self.assertEqual("ms", stats.ml_processing_time[0]["unit"]) - self.assertEqual(600*1000, opm2["duration"]) + assert opm2["throughput"] == collections.OrderedDict( + [("min", None), ("mean", None), ("median", None), ("max", None), ("unit", "docs/s")]) + + assert len(stats.ml_processing_time) == 1 + assert stats.ml_processing_time[0]["job"] == "benchmark_ml_job_1" + assert stats.ml_processing_time[0]["min"] == 2.2 + assert stats.ml_processing_time[0]["mean"] == 12.3 + assert stats.ml_processing_time[0]["median"] == 17.2 + assert stats.ml_processing_time[0]["max"] == 36.0 + assert stats.ml_processing_time[0]["unit"] == "ms" + assert opm2["duration"] == 600*1000 def test_calculate_system_stats(self): cfg = config.Config() @@ -1769,14 +1768,14 @@ def test_calculate_system_stats(self): del store - self.assertEqual([ + assert stats.node_metrics == [ { "node": "rally-node-0", "name": "index_size", "value": 2048, "unit": "bytes" } - ], stats.node_metrics) + ] def select(l, name, operation=None, job=None, node=None): @@ -1916,7 +1915,7 @@ def test_as_flat_list(self): s = metrics.GlobalStats(d) metric_list = s.as_flat_list() - self.assertEqual({ + assert select(metric_list, "throughput", operation="index") == { "name": "throughput", "task": "index #1", "operation": "index", @@ -1931,9 +1930,9 @@ def test_as_flat_list(self): "clients": 8, "phase": "idx" } - }, select(metric_list, "throughput", operation="index")) + } - self.assertEqual({ + assert select(metric_list, "service_time", operation="index") == { "name": "service_time", "task": "index #1", "operation": "index", @@ -1945,9 +1944,9 @@ def test_as_flat_list(self): "clients": 8, "phase": "idx" } - }, select(metric_list, "service_time", operation="index")) + } - self.assertEqual({ + assert select(metric_list, "latency", operation="index") == { "name": "latency", "task": "index #1", "operation": "index", @@ -1959,9 +1958,9 @@ def test_as_flat_list(self): "clients": 8, "phase": "idx" } - }, select(metric_list, "latency", operation="index")) + } - self.assertEqual({ + assert select(metric_list, "error_rate", operation="index") == { "name": "error_rate", "task": "index #1", "operation": "index", @@ -1972,9 +1971,9 @@ def test_as_flat_list(self): "clients": 8, "phase": "idx" } - }, select(metric_list, "error_rate", operation="index")) + } - self.assertEqual({ + assert select(metric_list, "throughput", operation="search") == { "name": "throughput", "task": "search #2", "operation": "search", @@ -1985,9 +1984,9 @@ def test_as_flat_list(self): "max": 12, "unit": "ops/s" } - }, select(metric_list, "throughput", operation="search")) + } - self.assertEqual({ + assert select(metric_list, "service_time", operation="search") == { "name": "service_time", "task": "search #2", "operation": "search", @@ -1995,9 +1994,9 @@ def test_as_flat_list(self): "50": 98, "100": 110 } - }, select(metric_list, "service_time", operation="search")) + } - self.assertEqual({ + assert select(metric_list, "latency", operation="search") == { "name": "latency", "task": "search #2", "operation": "search", @@ -2005,18 +2004,18 @@ def test_as_flat_list(self): "50": 99, "100": 111 } - }, select(metric_list, "latency", operation="search")) + } - self.assertEqual({ + assert select(metric_list, "error_rate", operation="search") == { "name": "error_rate", "task": "search #2", "operation": "search", "value": { "single": 0.1 } - }, select(metric_list, "error_rate", operation="search")) + } - self.assertEqual({ + assert select(metric_list, "ml_processing_time", job="job_1") == { "name": "ml_processing_time", "job": "job_1", "value": { @@ -2025,9 +2024,9 @@ def test_as_flat_list(self): "median": 5.8, "max": 12.34 } - }, select(metric_list, "ml_processing_time", job="job_1")) + } - self.assertEqual({ + assert select(metric_list, "ml_processing_time", job="job_2") == { "name": "ml_processing_time", "job": "job_2", "value": { @@ -2036,42 +2035,42 @@ def test_as_flat_list(self): "median": 4.9, "max": 9.4 } - }, select(metric_list, "ml_processing_time", job="job_2")) + } - self.assertEqual({ + assert select(metric_list, "young_gc_time") == { "name": "young_gc_time", "value": { "single": 68 } - }, select(metric_list, "young_gc_time")) - self.assertEqual({ + } + assert select(metric_list, "young_gc_count") == { "name": "young_gc_count", "value": { "single": 7 } - }, select(metric_list, "young_gc_count")) + } - self.assertEqual({ + assert select(metric_list, "old_gc_time") == { "name": "old_gc_time", "value": { "single": 0 } - }, select(metric_list, "old_gc_time")) - self.assertEqual({ + } + assert select(metric_list, "old_gc_count") == { "name": "old_gc_count", "value": { "single": 0 } - }, select(metric_list, "old_gc_count")) + } - self.assertEqual({ + assert select(metric_list, "merge_time") == { "name": "merge_time", "value": { "single": 3702 } - }, select(metric_list, "merge_time")) + } - self.assertEqual({ + assert select(metric_list, "merge_time_per_shard") == { "name": "merge_time_per_shard", "value": { "min": 40, @@ -2079,23 +2078,23 @@ def test_as_flat_list(self): "max": 3900, "unit": "ms" } - }, select(metric_list, "merge_time_per_shard")) + } - self.assertEqual({ + assert select(metric_list, "merge_count") == { "name": "merge_count", "value": { "single": 2 } - }, select(metric_list, "merge_count")) + } - self.assertEqual({ + assert select(metric_list, "refresh_time") == { "name": "refresh_time", "value": { "single": 596 } - }, select(metric_list, "refresh_time")) + } - self.assertEqual({ + assert select(metric_list, "refresh_time_per_shard") == { "name": "refresh_time_per_shard", "value": { "min": 48, @@ -2103,23 +2102,23 @@ def test_as_flat_list(self): "max": 204, "unit": "ms" } - }, select(metric_list, "refresh_time_per_shard")) + } - self.assertEqual({ + assert select(metric_list, "refresh_count") == { "name": "refresh_count", "value": { "single": 10 } - }, select(metric_list, "refresh_count")) + } - self.assertIsNone(select(metric_list, "flush_time")) - self.assertIsNone(select(metric_list, "flush_time_per_shard")) - self.assertEqual({ + assert select(metric_list, "flush_time") is None + assert select(metric_list, "flush_time_per_shard") is None + assert select(metric_list, "flush_count") == { "name": "flush_count", "value": { "single": 0 } - }, select(metric_list, "flush_count")) + } class SystemStatsTests(TestCase): @@ -2162,50 +2161,50 @@ def test_as_flat_list(self): s = metrics.SystemStats(d) metric_list = s.as_flat_list() - self.assertEqual({ + assert select(metric_list, "startup_time", node="rally-node-0") == { "node": "rally-node-0", "name": "startup_time", "value": { "single": 3.4 } - }, select(metric_list, "startup_time", node="rally-node-0")) + } - self.assertEqual({ + assert select(metric_list, "startup_time", node="rally-node-1") == { "node": "rally-node-1", "name": "startup_time", "value": { "single": 4.2 } - }, select(metric_list, "startup_time", node="rally-node-1")) + } - self.assertEqual({ + assert select(metric_list, "index_size", node="rally-node-0") == { "node": "rally-node-0", "name": "index_size", "value": { "single": 300 * 1024 * 1024 } - }, select(metric_list, "index_size", node="rally-node-0")) + } - self.assertEqual({ + assert select(metric_list, "index_size", node="rally-node-1") == { "node": "rally-node-1", "name": "index_size", "value": { "single": 302 * 1024 * 1024 } - }, select(metric_list, "index_size", node="rally-node-1")) + } - self.assertEqual({ + assert select(metric_list, "bytes_written", node="rally-node-0") == { "node": "rally-node-0", "name": "bytes_written", "value": { "single": 817 * 1024 * 1024 } - }, select(metric_list, "bytes_written", node="rally-node-0")) + } - self.assertEqual({ + assert select(metric_list, "bytes_written", node="rally-node-1") == { "node": "rally-node-1", "name": "bytes_written", "value": { "single": 833 * 1024 * 1024 } - }, select(metric_list, "bytes_written", node="rally-node-1")) + } diff --git a/tests/reporter_test.py b/tests/reporter_test.py index 4025c4245..ae1474040 100644 --- a/tests/reporter_test.py +++ b/tests/reporter_test.py @@ -36,17 +36,17 @@ def setUp(self): def test_formats_as_markdown(self): formatted = reporter.format_as_markdown(self.empty_header, self.empty_data, self.numbers_align) # 1 header line, 1 separation line + 0 data lines - self.assertEqual(1 + 1 + 0, len(formatted.splitlines())) + assert len(formatted.splitlines()) == 1 + 1 + 0 formatted = reporter.format_as_markdown(self.metrics_header, self.metrics_data, self.numbers_align) # 1 header line, 1 separation line + 3 data lines - self.assertEqual(1 + 1 + 3, len(formatted.splitlines())) + assert len(formatted.splitlines()) == 1 + 1 + 3 def test_formats_as_csv(self): formatted = reporter.format_as_csv(self.empty_header, self.empty_data) # 1 header line, no separation line + 0 data lines - self.assertEqual(1 + 0, len(formatted.splitlines())) + assert len(formatted.splitlines()) == 1 + 0 formatted = reporter.format_as_csv(self.metrics_header, self.metrics_data) # 1 header line, no separation line + 3 data lines - self.assertEqual(1 + 3, len(formatted.splitlines())) + assert len(formatted.splitlines()) == 1 + 3 diff --git a/tests/telemetry_test.py b/tests/telemetry_test.py index f2ce41aff..4565a59e6 100644 --- a/tests/telemetry_test.py +++ b/tests/telemetry_test.py @@ -78,9 +78,9 @@ def test_merges_options_set_by_different_devices(self): opts = t.instrument_candidate_java_opts() - self.assertIsNotNone(opts) - self.assertEqual(len(opts), 3) - self.assertEqual(["-Xms256M", "-Xmx512M", "-Des.network.host=127.0.0.1"], opts) + assert opts is not None + assert len(opts) == 3 + assert opts == ["-Xms256M", "-Xmx512M", "-Des.network.host=127.0.0.1"] class StartupTimeTests(TestCase): @@ -172,79 +172,77 @@ class JfrTests(TestCase): def test_sets_options_for_pre_java_9_default_recording_template(self): jfr = telemetry.FlightRecorder(telemetry_params={}, log_root="/var/log", java_major_version=random.randint(0, 8)) java_opts = jfr.java_opts("/var/log/test-recording.jfr") - self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", + assert java_opts == ["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", "-XX:+FlightRecorder", "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true," - "dumponexitpath=/var/log/test-recording.jfr", "-XX:StartFlightRecording=defaultrecording=true"], java_opts) + "dumponexitpath=/var/log/test-recording.jfr", "-XX:StartFlightRecording=defaultrecording=true"] def test_sets_options_for_java_9_or_10_default_recording_template(self): jfr = telemetry.FlightRecorder(telemetry_params={}, log_root="/var/log", java_major_version=random.randint(9, 10)) java_opts = jfr.java_opts("/var/log/test-recording.jfr") - self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", + assert java_opts == ["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", "-XX:StartFlightRecording=maxsize=0,maxage=0s,disk=true," - "dumponexit=true,filename=/var/log/test-recording.jfr"], java_opts) + "dumponexit=true,filename=/var/log/test-recording.jfr"] def test_sets_options_for_java_11_or_above_default_recording_template(self): jfr = telemetry.FlightRecorder(telemetry_params={}, log_root="/var/log", java_major_version=random.randint(11, 999)) java_opts = jfr.java_opts("/var/log/test-recording.jfr") - self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", + assert java_opts == ["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:StartFlightRecording=maxsize=0,maxage=0s,disk=true," - "dumponexit=true,filename=/var/log/test-recording.jfr"], java_opts) + "dumponexit=true,filename=/var/log/test-recording.jfr"] def test_sets_options_for_pre_java_9_custom_recording_template(self): jfr = telemetry.FlightRecorder(telemetry_params={"recording-template": "profile"}, log_root="/var/log", java_major_version=random.randint(0, 8)) java_opts = jfr.java_opts("/var/log/test-recording.jfr") - self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", + assert java_opts == ["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", "-XX:+FlightRecorder", "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true," "dumponexitpath=/var/log/test-recording.jfr", - "-XX:StartFlightRecording=defaultrecording=true,settings=profile"], java_opts) + "-XX:StartFlightRecording=defaultrecording=true,settings=profile"] def test_sets_options_for_java_9_or_10_custom_recording_template(self): jfr = telemetry.FlightRecorder(telemetry_params={"recording-template": "profile"}, log_root="/var/log", java_major_version=random.randint(9, 10)) java_opts = jfr.java_opts("/var/log/test-recording.jfr") - self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", + assert java_opts == ["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:+UnlockCommercialFeatures", "-XX:StartFlightRecording=maxsize=0,maxage=0s,disk=true,dumponexit=true," - "filename=/var/log/test-recording.jfr,settings=profile"], java_opts) + "filename=/var/log/test-recording.jfr,settings=profile"] def test_sets_options_for_java_11_or_above_custom_recording_template(self): jfr = telemetry.FlightRecorder(telemetry_params={"recording-template": "profile"}, log_root="/var/log", java_major_version=random.randint(11, 999)) java_opts = jfr.java_opts("/var/log/test-recording.jfr") - self.assertEqual(["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", + assert java_opts == ["-XX:+UnlockDiagnosticVMOptions", "-XX:+DebugNonSafepoints", "-XX:StartFlightRecording=maxsize=0,maxage=0s,disk=true,dumponexit=true," - "filename=/var/log/test-recording.jfr,settings=profile"], java_opts) + "filename=/var/log/test-recording.jfr,settings=profile"] class GcTests(TestCase): def test_sets_options_for_pre_java_9(self): gc = telemetry.Gc(telemetry_params={}, log_root="/var/log", java_major_version=random.randint(0, 8)) gc_java_opts = gc.java_opts("/var/log/defaults-node-0.gc.log") - self.assertEqual(7, len(gc_java_opts)) - self.assertEqual(["-Xloggc:/var/log/defaults-node-0.gc.log", "-XX:+PrintGCDetails", "-XX:+PrintGCDateStamps", + assert len(gc_java_opts) == 7 + assert gc_java_opts == ["-Xloggc:/var/log/defaults-node-0.gc.log", "-XX:+PrintGCDetails", "-XX:+PrintGCDateStamps", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCApplicationStoppedTime", "-XX:+PrintGCApplicationConcurrentTime", - "-XX:+PrintTenuringDistribution"], gc_java_opts) + "-XX:+PrintTenuringDistribution"] def test_sets_options_for_java_9_or_above(self): gc = telemetry.Gc(telemetry_params={}, log_root="/var/log", java_major_version=random.randint(9, 999)) gc_java_opts = gc.java_opts("/var/log/defaults-node-0.gc.log") - self.assertEqual(1, len(gc_java_opts)) - self.assertEqual( - ["-Xlog:gc*=info,safepoint=info,age*=trace:file=/var/log/defaults-node-0.gc.log:utctime,uptimemillis,level,tags:filecount=0"], - gc_java_opts) + assert len(gc_java_opts) == 1 + assert gc_java_opts == \ + ["-Xlog:gc*=info,safepoint=info,age*=trace:file=/var/log/defaults-node-0.gc.log:utctime,uptimemillis,level,tags:filecount=0"] def test_can_override_options_for_java_9_or_above(self): gc = telemetry.Gc(telemetry_params={"gc-log-config": "gc,safepoint"}, log_root="/var/log", java_major_version=random.randint(9, 999)) gc_java_opts = gc.java_opts("/var/log/defaults-node-0.gc.log") - self.assertEqual(1, len(gc_java_opts)) - self.assertEqual( - ["-Xlog:gc,safepoint:file=/var/log/defaults-node-0.gc.log:utctime,uptimemillis,level,tags:filecount=0"], - gc_java_opts) + assert len(gc_java_opts) == 1 + assert gc_java_opts == \ + ["-Xlog:gc,safepoint:file=/var/log/defaults-node-0.gc.log:utctime,uptimemillis,level,tags:filecount=0"] class HeapdumpTests(TestCase): @@ -290,8 +288,7 @@ def test_negative_sample_interval_forbidden(self): telemetry_params = { "ccr-stats-sample-interval": -1 * random.random() } - with self.assertRaisesRegex(exceptions.SystemSetupError, - r"The telemetry parameter 'ccr-stats-sample-interval' must be greater than zero but was .*\."): + with pytest.raises(exceptions.SystemSetupError, match=r"The telemetry parameter 'ccr-stats-sample-interval' must be greater than zero but was .*\."): telemetry.CcrStats(telemetry_params, clients, metrics_store) def test_wrong_cluster_name_in_ccr_stats_indices_forbidden(self): @@ -304,11 +301,9 @@ def test_wrong_cluster_name_in_ccr_stats_indices_forbidden(self): "wrong_cluster_name": ["follower"] } } - with self.assertRaisesRegex(exceptions.SystemSetupError, - r"The telemetry parameter 'ccr-stats-indices' must be a JSON Object with keys matching " + with pytest.raises(exceptions.SystemSetupError, match=r"The telemetry parameter 'ccr-stats-indices' must be a JSON Object with keys matching " r"the cluster names \[{}] specified in --target-hosts " - r"but it had \[wrong_cluster_name\].".format(",".join(sorted(clients.keys()))) - ): + r"but it had \[wrong_cluster_name\].".format(",".join(sorted(clients.keys())))): telemetry.CcrStats(telemetry_params, clients, metrics_store) @@ -319,8 +314,7 @@ def test_raises_exception_on_transport_error(self): client = Client(transport_client=TransportClient(response={}, force_error=True)) cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) - with self.assertRaisesRegex(exceptions.RallyError, - r"A transport error occurred while collecting CCR stats from the endpoint " + with pytest.raises(exceptions.RallyError, match=r"A transport error occurred while collecting CCR stats from the endpoint " r"\[/_ccr/stats\?filter_path=follow_stats\] on " r"cluster \[remote\]"): telemetry.CcrStatsRecorder(cluster_name="remote", client=client, metrics_store=metrics_store, sample_interval=1).record() @@ -683,7 +677,7 @@ def test_no_metrics_if_no_pending_recoveries(self, metrics_store_put_doc): indices=["index1"]) recorder.record() - self.assertEqual(0, metrics_store_put_doc.call_count) + assert metrics_store_put_doc.call_count == 0 @mock.patch("esrally.metrics.EsMetricsStore.put_doc") def test_stores_single_shard_stats(self, metrics_store_put_doc): @@ -1878,8 +1872,7 @@ def test_negative_sample_interval_forbidden(self): telemetry_params = { "node-stats-sample-interval": -1 * random.random() } - with self.assertRaisesRegex(exceptions.SystemSetupError, - r"The telemetry parameter 'node-stats-sample-interval' must be greater than zero but was .*\."): + with pytest.raises(exceptions.SystemSetupError, match=r"The telemetry parameter 'node-stats-sample-interval' must be greater than zero but was .*\."): telemetry.NodeStatsRecorder(telemetry_params, cluster_name="default", client=client, metrics_store=metrics_store) def test_flatten_indices_fields(self): @@ -1892,7 +1885,7 @@ def test_flatten_indices_fields(self): prefix="indices", stats=NodeStatsRecorderTests.node_stats_response["nodes"]["Zbl_e8EyRXmiR47gbHgPfg"]["indices"] ) - self.assertDictEqual(NodeStatsRecorderTests.indices_stats_response_flattened, flattened_fields) + assert flattened_fields == NodeStatsRecorderTests.indices_stats_response_flattened @mock.patch("esrally.metrics.EsMetricsStore.put_doc") def test_stores_default_nodes_stats(self, metrics_store_put_doc): @@ -2522,8 +2515,7 @@ def test_exception_when_include_indices_metrics_not_valid(self): telemetry_params = { "node-stats-include-indices-metrics": {"bad": "input"} } - with self.assertRaisesRegex(exceptions.SystemSetupError, - "The telemetry parameter 'node-stats-include-indices-metrics' must be " + with pytest.raises(exceptions.SystemSetupError, match="The telemetry parameter 'node-stats-include-indices-metrics' must be " "a comma-separated string but was "): telemetry.NodeStatsRecorder(telemetry_params, cluster_name="remote", client=client, metrics_store=metrics_store) @@ -2536,8 +2528,7 @@ def test_negative_sample_interval_forbidden(self): telemetry_params = { "transform-stats-sample-interval": -1 * random.random() } - with self.assertRaisesRegex(exceptions.SystemSetupError, - r"The telemetry parameter 'transform-stats-sample-interval' must be greater than zero but was .*\."): + with pytest.raises(exceptions.SystemSetupError, match=r"The telemetry parameter 'transform-stats-sample-interval' must be greater than zero but was .*\."): telemetry.TransformStats(telemetry_params, clients, metrics_store) def test_wrong_cluster_name_in_transform_stats_indices_forbidden(self): @@ -2550,11 +2541,9 @@ def test_wrong_cluster_name_in_transform_stats_indices_forbidden(self): "wrong_cluster_name": ["follower"] } } - with self.assertRaisesRegex(exceptions.SystemSetupError, - r"The telemetry parameter 'transform-stats-transforms' must be a JSON Object with keys matching " + with pytest.raises(exceptions.SystemSetupError, match=r"The telemetry parameter 'transform-stats-transforms' must be a JSON Object with keys matching " r"the cluster names \[{}] specified in --target-hosts " - r"but it had \[wrong_cluster_name\].".format(",".join(sorted(clients.keys()))) - ): + r"but it had \[wrong_cluster_name\].".format(",".join(sorted(clients.keys())))): telemetry.TransformStats(telemetry_params, clients, metrics_store) @@ -2730,7 +2719,7 @@ def test_resilient_if_error_response(self, metrics_store_add_meta_info): t = telemetry.Telemetry(cfg, devices=[env_device]) t.on_benchmark_start() - self.assertEqual(0, metrics_store_add_meta_info.call_count) + assert metrics_store_add_meta_info.call_count == 0 class NodeEnvironmentInfoTests(TestCase): @@ -2899,7 +2888,7 @@ def test_resilient_if_error_response(self, metrics_store_add_meta_info): t = telemetry.Telemetry(self.cfg, devices=[env_device]) t.on_benchmark_start() - self.assertEqual(0, metrics_store_add_meta_info.call_count) + assert metrics_store_add_meta_info.call_count == 0 class DiskIoTests(TestCase): @@ -3374,7 +3363,7 @@ def test_error_on_retrieval_does_not_store_metrics(self, es, metrics_store_put_d t = telemetry.Telemetry(cfg, devices=[device]) t.on_benchmark_stop() - self.assertEqual(0, metrics_store_put_doc.call_count) + assert metrics_store_put_doc.call_count == 0 @mock.patch("esrally.metrics.EsMetricsStore.put_doc") @mock.patch("elasticsearch.Elasticsearch") @@ -3392,7 +3381,7 @@ def test_empty_result_does_not_store_metrics(self, es, metrics_store_put_doc): t = telemetry.Telemetry(cfg, devices=[device]) t.on_benchmark_stop() - self.assertEqual(0, metrics_store_put_doc.call_count) + assert metrics_store_put_doc.call_count == 0 @mock.patch("esrally.metrics.EsMetricsStore.put_doc") @mock.patch("elasticsearch.Elasticsearch") @@ -3511,6 +3500,6 @@ def test_stores_nothing_if_no_data_path(self, run_subprocess, metrics_store_clus t.detach_from_node(node, running=False) t.store_system_metrics(node, metrics_store) - self.assertEqual(0, run_subprocess.call_count) - self.assertEqual(0, metrics_store_cluster_value.call_count) - self.assertEqual(0, get_size.call_count) + assert run_subprocess.call_count == 0 + assert metrics_store_cluster_value.call_count == 0 + assert get_size.call_count == 0 diff --git a/tests/test_async_connection.py b/tests/test_async_connection.py index 13fec9a58..a29041ac9 100644 --- a/tests/test_async_connection.py +++ b/tests/test_async_connection.py @@ -52,4 +52,4 @@ def test_matches(self): def assert_response_type(self, matcher, path, expected_response_type): response = json.loads(matcher.response(path)) - self.assertEqual(response["response-type"], expected_response_type) + assert expected_response_type == response["response-type"] diff --git a/tests/time_test.py b/tests/time_test.py index 9f24bc76f..55bff8062 100644 --- a/tests/time_test.py +++ b/tests/time_test.py @@ -31,11 +31,11 @@ def test_split_time_increases(self): for _ in range(3): time.sleep(wait_period_seconds) split_time = stop_watch.split_time() - self.assertLess(prev_split_time, split_time) + assert prev_split_time < split_time prev_split_time = split_time stop_watch.stop() total_time = stop_watch.total_time() - self.assertLessEqual(prev_split_time, total_time) + assert prev_split_time <= total_time def test_total_time_roughly_in_expected_range(self): wait_period_seconds = 0.05 @@ -48,8 +48,8 @@ def test_total_time_roughly_in_expected_range(self): interval = stop_watch.total_time() # depending on scheduling accuracy we should end up somewhere in that range - self.assertGreaterEqual(interval, wait_period_seconds - acceptable_delta_seconds) - self.assertLessEqual(interval, wait_period_seconds + acceptable_delta_seconds) + assert interval >= wait_period_seconds - acceptable_delta_seconds + assert interval <= wait_period_seconds + acceptable_delta_seconds def test_millis_conversion_roughly_in_expected_range(self): wait_period_millis = 50 @@ -62,5 +62,5 @@ def test_millis_conversion_roughly_in_expected_range(self): interval_millis = end - start # depending on scheduling accuracy we should end up somewhere in that range - self.assertGreaterEqual(interval_millis, wait_period_millis - acceptable_delta_millis) - self.assertLessEqual(interval_millis, wait_period_millis + acceptable_delta_millis) + assert interval_millis >= wait_period_millis - acceptable_delta_millis + assert interval_millis <= wait_period_millis + acceptable_delta_millis diff --git a/tests/track/loader_test.py b/tests/track/loader_test.py index e8c3d6342..2283d8a8a 100644 --- a/tests/track/loader_test.py +++ b/tests/track/loader_test.py @@ -23,6 +23,8 @@ import urllib.error from unittest import TestCase +import pytest + from esrally import exceptions, config from esrally.track import loader, track from esrally.utils import io @@ -52,10 +54,10 @@ def test_track_from_directory(self, is_dir, path_exists): path_exists.return_value = True repo = loader.SimpleTrackRepository("/path/to/track/unit-test") - self.assertEqual("unit-test", repo.track_name) - self.assertEqual(["unit-test"], repo.track_names) - self.assertEqual("/path/to/track/unit-test", repo.track_dir("unit-test")) - self.assertEqual("/path/to/track/unit-test/track.json", repo.track_file("unit-test")) + assert repo.track_name == "unit-test" + assert repo.track_names == ["unit-test"] + assert repo.track_dir("unit-test") == "/path/to/track/unit-test" + assert repo.track_file("unit-test") == "/path/to/track/unit-test/track.json" @mock.patch("os.path.exists") @mock.patch("os.path.isdir") @@ -66,10 +68,10 @@ def test_track_from_file(self, is_file, is_dir, path_exists): path_exists.return_value = True repo = loader.SimpleTrackRepository("/path/to/track/unit-test/my-track.json") - self.assertEqual("my-track", repo.track_name) - self.assertEqual(["my-track"], repo.track_names) - self.assertEqual("/path/to/track/unit-test", repo.track_dir("my-track")) - self.assertEqual("/path/to/track/unit-test/my-track.json", repo.track_file("my-track")) + assert repo.track_name == "my-track" + assert repo.track_names == ["my-track"] + assert repo.track_dir("my-track") == "/path/to/track/unit-test" + assert repo.track_file("my-track") == "/path/to/track/unit-test/my-track.json" @mock.patch("os.path.exists") @mock.patch("os.path.isdir") @@ -79,16 +81,16 @@ def test_track_from_named_pipe(self, is_file, is_dir, path_exists): is_dir.return_value = False path_exists.return_value = True - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: loader.SimpleTrackRepository("a named pipe cannot point to a track") - self.assertEqual("a named pipe cannot point to a track is neither a file nor a directory", ctx.exception.args[0]) + assert ctx.value.args[0] == "a named pipe cannot point to a track is neither a file nor a directory" @mock.patch("os.path.exists") def test_track_from_non_existing_path(self, path_exists): path_exists.return_value = False - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: loader.SimpleTrackRepository("/path/does/not/exist") - self.assertEqual("Track path /path/does/not/exist does not exist", ctx.exception.args[0]) + assert ctx.value.args[0] == "Track path /path/does/not/exist does not exist" @mock.patch("os.path.isdir") @mock.patch("os.path.exists") @@ -96,9 +98,9 @@ def test_track_from_directory_without_track(self, path_exists, is_dir): # directory exists, but not the file path_exists.side_effect = [True, False] is_dir.return_value = True - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: loader.SimpleTrackRepository("/path/to/not/a/track") - self.assertEqual("Could not find track.json in /path/to/not/a/track", ctx.exception.args[0]) + assert ctx.value.args[0] == "Could not find track.json in /path/to/not/a/track" @mock.patch("os.path.exists") @mock.patch("os.path.isdir") @@ -108,9 +110,9 @@ def test_track_from_file_but_not_json(self, is_file, is_dir, path_exists): is_dir.return_value = False path_exists.return_value = True - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: loader.SimpleTrackRepository("/path/to/track/unit-test/my-track.xml") - self.assertEqual("/path/to/track/unit-test/my-track.xml has to be a JSON file", ctx.exception.args[0]) + assert ctx.value.args[0] == "/path/to/track/unit-test/my-track.xml has to be a JSON file" class GitRepositoryTests(TestCase): @@ -132,10 +134,10 @@ def test_track_from_existing_repo(self, walk, exists): repo = loader.GitTrackRepository(cfg, fetch=False, update=False, repo_class=GitRepositoryTests.MockGitRepo) - self.assertEqual("unittest", repo.track_name) - self.assertEqual(["unittest", "unittest2", "unittest3"], list(repo.track_names)) - self.assertEqual("/tmp/tracks/default/unittest", repo.track_dir("unittest")) - self.assertEqual("/tmp/tracks/default/unittest/track.json", repo.track_file("unittest")) + assert repo.track_name == "unittest" + assert list(repo.track_names) == ["unittest", "unittest2", "unittest3"] + assert repo.track_dir("unittest") == "/tmp/tracks/default/unittest" + assert repo.track_file("unittest") == "/tmp/tracks/default/unittest/track.json" class TrackPreparationTests(TestCase): @@ -199,7 +201,7 @@ def test_raise_error_on_wrong_uncompressed_file_size(self, is_file, get_size, de downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - with self.assertRaises(exceptions.DataError) as ctx: + with pytest.raises(exceptions.DataError) as ctx: p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, document_file="docs.json", document_archive="docs.json.bz2", @@ -207,7 +209,7 @@ def test_raise_error_on_wrong_uncompressed_file_size(self, is_file, get_size, de compressed_size_in_bytes=200, uncompressed_size_in_bytes=2000), data_root="/tmp") - self.assertEqual("[/tmp/docs.json] is corrupt. Extracted [1] bytes but [2000] bytes are expected.", ctx.exception.args[0]) + assert ctx.value.args[0] == "[/tmp/docs.json] is corrupt. Extracted [1] bytes but [2000] bytes are expected." decompress.assert_called_with("/tmp/docs.json.bz2", "/tmp") @@ -226,7 +228,7 @@ def test_raise_error_if_compressed_does_not_contain_expected_document_file(self, downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - with self.assertRaises(exceptions.DataError) as ctx: + with pytest.raises(exceptions.DataError) as ctx: p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, base_url="http://benchmarks.elasticsearch.org/corpora/unit-test", document_file="docs.json", @@ -235,8 +237,8 @@ def test_raise_error_if_compressed_does_not_contain_expected_document_file(self, compressed_size_in_bytes=200, uncompressed_size_in_bytes=2000), data_root="/tmp") - self.assertEqual("Decompressing [/tmp/docs.json.bz2] did not create [/tmp/docs.json]. Please check with the track author if the " - "compressed archive has been created correctly.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Decompressing [/tmp/docs.json.bz2] did not create [/tmp/docs.json]. Please check with the track author if the " \ + "compressed archive has been created correctly." decompress.assert_called_with("/tmp/docs.json.bz2", "/tmp") @@ -364,7 +366,7 @@ def test_raise_download_error_if_offline(self, is_file, ensure_dir, download): downloader=loader.Downloader(offline=True, test_mode=False), decompressor=loader.Decompressor()) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, base_url="http://benchmarks.elasticsearch.org/corpora/unit-test", document_file="docs.json", @@ -372,10 +374,10 @@ def test_raise_download_error_if_offline(self, is_file, ensure_dir, download): uncompressed_size_in_bytes=2000), data_root="/tmp") - self.assertEqual("Cannot find [/tmp/docs.json]. Please disable offline mode and retry.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Cannot find [/tmp/docs.json]. Please disable offline mode and retry." - self.assertEqual(0, ensure_dir.call_count) - self.assertEqual(0, download.call_count) + assert ensure_dir.call_count == 0 + assert download.call_count == 0 @mock.patch("esrally.utils.net.download") @mock.patch("esrally.utils.io.ensure_dir") @@ -388,7 +390,7 @@ def test_raise_download_error_if_no_url_provided_and_file_missing(self, is_file, downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - with self.assertRaises(exceptions.DataError) as ctx: + with pytest.raises(exceptions.DataError) as ctx: p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, base_url=None, document_file="docs.json", @@ -397,10 +399,10 @@ def test_raise_download_error_if_no_url_provided_and_file_missing(self, is_file, uncompressed_size_in_bytes=2000), data_root="/tmp") - self.assertEqual("Cannot download data because no base URL is provided.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Cannot download data because no base URL is provided." - self.assertEqual(0, ensure_dir.call_count) - self.assertEqual(0, download.call_count) + assert ensure_dir.call_count == 0 + assert download.call_count == 0 @mock.patch("esrally.utils.net.download") @mock.patch("esrally.utils.io.ensure_dir") @@ -416,18 +418,18 @@ def test_raise_download_error_if_no_url_provided_and_wrong_file_size(self, is_fi downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - with self.assertRaises(exceptions.DataError) as ctx: + with pytest.raises(exceptions.DataError) as ctx: p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, document_file="docs.json", number_of_documents=5, uncompressed_size_in_bytes=2000), data_root="/tmp") - self.assertEqual("[/tmp/docs.json] is present but does not have the expected size of [2000] bytes and it " - "cannot be downloaded because no base URL is provided.", ctx.exception.args[0]) + assert ctx.value.args[0] == "[/tmp/docs.json] is present but does not have the expected size of [2000] bytes and it " \ + "cannot be downloaded because no base URL is provided." - self.assertEqual(0, ensure_dir.call_count) - self.assertEqual(0, download.call_count) + assert ensure_dir.call_count == 0 + assert download.call_count == 0 @mock.patch("esrally.utils.net.download") @mock.patch("esrally.utils.io.ensure_dir") @@ -443,7 +445,7 @@ def test_raise_download_error_no_test_mode_file(self, is_file, ensure_dir, downl downloader=loader.Downloader(offline=False, test_mode=True), decompressor=loader.Decompressor()) - with self.assertRaises(exceptions.DataError) as ctx: + with pytest.raises(exceptions.DataError) as ctx: p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, base_url="http://benchmarks.elasticsearch.org/corpora/unit-test", document_file="docs-1k.json", @@ -451,8 +453,8 @@ def test_raise_download_error_no_test_mode_file(self, is_file, ensure_dir, downl uncompressed_size_in_bytes=None), data_root="/tmp") - self.assertEqual("This track does not support test mode. Ask the track author to add it or disable " - "test mode and retry.", ctx.exception.args[0]) + assert ctx.value.args[0] == "This track does not support test mode. Ask the track author to add it or disable " \ + "test mode and retry." ensure_dir.assert_called_with("/tmp") download.assert_called_with("http://benchmarks.elasticsearch.org/corpora/unit-test/docs-1k.json", @@ -472,7 +474,7 @@ def test_raise_download_error_on_connection_problems(self, is_file, ensure_dir, downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - with self.assertRaises(exceptions.DataError) as ctx: + with pytest.raises(exceptions.DataError) as ctx: p.prepare_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, base_url="http://benchmarks.elasticsearch.org/corpora/unit-test", document_file="docs.json", @@ -480,8 +482,8 @@ def test_raise_download_error_on_connection_problems(self, is_file, ensure_dir, uncompressed_size_in_bytes=2000), data_root="/tmp") - self.assertEqual("Could not download [http://benchmarks.elasticsearch.org/corpora/unit-test/docs.json] " - "to [/tmp/docs.json] (HTTP status: 500, reason: Internal Server Error)", ctx.exception.args[0]) + assert ctx.value.args[0] == "Could not download [http://benchmarks.elasticsearch.org/corpora/unit-test/docs.json] " \ + "to [/tmp/docs.json] (HTTP status: 500, reason: Internal Server Error)" ensure_dir.assert_called_with("/tmp") download.assert_called_with("http://benchmarks.elasticsearch.org/corpora/unit-test/docs.json", @@ -501,13 +503,13 @@ def test_prepare_bundled_document_set_if_document_file_available(self, is_file, downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - self.assertTrue(p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, + assert p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, document_file="docs.json", document_archive="docs.json.bz2", number_of_documents=5, compressed_size_in_bytes=200, uncompressed_size_in_bytes=2000), - data_root=".")) + data_root=".") prepare_file_offset_table.assert_called_with("./docs.json") @@ -523,16 +525,16 @@ def test_prepare_bundled_document_set_does_nothing_if_no_document_files(self, is downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - self.assertFalse(p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, + assert not p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, document_file="docs.json", document_archive="docs.json.bz2", number_of_documents=5, compressed_size_in_bytes=200, uncompressed_size_in_bytes=2000), - data_root=".")) + data_root=".") - self.assertEqual(0, decompress.call_count) - self.assertEqual(0, prepare_file_offset_table.call_count) + assert decompress.call_count == 0 + assert prepare_file_offset_table.call_count == 0 def test_used_corpora(self): track_specification = { @@ -657,14 +659,13 @@ def test_used_corpora(self): reader = loader.TrackSpecificationReader(selected_challenge="default-challenge") full_track = reader("unittest", track_specification, "/mappings") used_corpora = sorted(loader.used_corpora(full_track), key=lambda c: c.name) - self.assertEqual(2, len(used_corpora)) - self.assertEqual("http_logs", used_corpora[0].name) + assert len(used_corpora) == 2 + assert used_corpora[0].name == "http_logs" # each bulk operation requires a different data file but they should have been merged properly. - self.assertEqual({"documents-181998.json.bz2", "documents-191998.json.bz2"}, - {d.document_archive for d in used_corpora[0].documents}) + assert {d.document_archive for d in used_corpora[0].documents} == {"documents-181998.json.bz2", "documents-191998.json.bz2"} - self.assertEqual("http_logs_unparsed", used_corpora[1].name) - self.assertEqual({"documents-201998.unparsed.json.bz2"}, {d.document_archive for d in used_corpora[1].documents}) + assert used_corpora[1].name == "http_logs_unparsed" + assert {d.document_archive for d in used_corpora[1].documents} == {"documents-201998.unparsed.json.bz2"} @mock.patch("esrally.utils.io.prepare_file_offset_table") @mock.patch("esrally.utils.io.decompress") @@ -686,13 +687,13 @@ def test_prepare_bundled_document_set_decompresses_compressed_docs(self, is_file downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - self.assertTrue(p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, + assert p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, document_file="docs.json", document_archive="docs.json.bz2", number_of_documents=5, compressed_size_in_bytes=200, uncompressed_size_in_bytes=2000), - data_root=".")) + data_root=".") prepare_file_offset_table.assert_called_with("./docs.json") @@ -709,7 +710,7 @@ def test_prepare_bundled_document_set_error_compressed_docs_wrong_size(self, is_ downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - with self.assertRaises(exceptions.DataError) as ctx: + with pytest.raises(exceptions.DataError) as ctx: p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, document_file="docs.json", document_archive="docs.json.bz2", @@ -718,8 +719,7 @@ def test_prepare_bundled_document_set_error_compressed_docs_wrong_size(self, is_ uncompressed_size_in_bytes=2000), data_root=".") - self.assertEqual("[./docs.json.bz2] is present but does not have the expected size of [200] bytes.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "[./docs.json.bz2] is present but does not have the expected size of [200] bytes." @mock.patch("esrally.utils.io.prepare_file_offset_table") @mock.patch("esrally.utils.io.decompress") @@ -735,7 +735,7 @@ def test_prepare_bundled_document_set_uncompressed_docs_wrong_size(self, is_file downloader=loader.Downloader(offline=False, test_mode=False), decompressor=loader.Decompressor()) - with self.assertRaises(exceptions.DataError) as ctx: + with pytest.raises(exceptions.DataError) as ctx: p.prepare_bundled_document_set(document_set=track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, document_file="docs.json", document_archive="docs.json.bz2", @@ -743,10 +743,9 @@ def test_prepare_bundled_document_set_uncompressed_docs_wrong_size(self, is_file compressed_size_in_bytes=200, uncompressed_size_in_bytes=2000), data_root=".") - self.assertEqual("[./docs.json] is present but does not have the expected size of [2000] bytes.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "[./docs.json] is present but does not have the expected size of [2000] bytes." - self.assertEqual(0, prepare_file_offset_table.call_count) + assert prepare_file_offset_table.call_count == 0 class TemplateSource(TestCase): @@ -832,10 +831,8 @@ def dummy_read_glob(c): } """) - self.assertEqual( - expected_response, - tmpl_src.replace_includes(base_path, track) - ) + assert tmpl_src.replace_includes(base_path, track) == \ + expected_response def test_read_glob_files(self): tmpl_obj = loader.TemplateSource( @@ -849,7 +846,7 @@ def test_read_glob_files(self): response = tmpl_obj.read_glob_files("*track_fragment_*.json") expected_response = '{\n "item1": "value1"\n}\n,\n{\n "item2": "value2"\n}\n' - self.assertEqual(expected_response, response) + assert response == expected_response class TemplateRenderTests(TestCase): @@ -871,7 +868,7 @@ def test_render_simple_template(self): "key2": "static value" } """ - self.assertEqual(expected, rendered) + assert rendered == expected def test_render_template_with_external_variables(self): template = """ @@ -890,7 +887,7 @@ def test_render_template_with_external_variables(self): "name": "stranger" } """ - self.assertEqual(expected, rendered) + assert rendered == expected def test_render_template_with_globbing(self): def key_globber(e): @@ -968,7 +965,7 @@ def test_render_template_with_variables(self): self.assertEqualIgnoreWhitespace(expected, rendered) def assertEqualIgnoreWhitespace(self, expected, actual): - self.assertEqual(strip_ws(expected), strip_ws(actual)) + assert strip_ws(actual) == strip_ws(expected) class CompleteTrackParamsTests(TestCase): @@ -983,19 +980,15 @@ def test_check_complete_track_params_contains_all_track_params(self): complete_track_params = loader.CompleteTrackParams() loader.register_all_params_in_track(CompleteTrackParamsTests.assembled_source, complete_track_params) - self.assertEqual( - ["value2", "value3"], - complete_track_params.sorted_track_defined_params - ) + assert complete_track_params.sorted_track_defined_params == \ + ["value2", "value3"] def test_check_complete_track_params_does_not_fail_with_no_track_params(self): complete_track_params = loader.CompleteTrackParams() loader.register_all_params_in_track('{}', complete_track_params) - self.assertEqual( - [], - complete_track_params.sorted_track_defined_params - ) + assert complete_track_params.sorted_track_defined_params == \ + [] def test_unused_user_defined_track_params(self): track_params = { @@ -1014,10 +1007,8 @@ def test_unused_user_defined_track_params(self): "number_of_shards"] ) - self.assertEqual( - ["enable_source", "number_of_repliacs"], - sorted(complete_track_params.unused_user_defined_track_params()) - ) + assert sorted(complete_track_params.unused_user_defined_track_params()) == \ + ["enable_source", "number_of_repliacs"] def test_unused_user_defined_track_params_doesnt_fail_with_detaults(self): complete_track_params = loader.CompleteTrackParams() @@ -1030,10 +1021,8 @@ def test_unused_user_defined_track_params_doesnt_fail_with_detaults(self): "number_of_shards"] ) - self.assertEqual( - [], - sorted(complete_track_params.unused_user_defined_track_params()) - ) + assert sorted(complete_track_params.unused_user_defined_track_params()) == \ + [] class TrackPostProcessingTests(TestCase): @@ -1269,17 +1258,13 @@ def test_post_processes_track_spec(self): cfg = config.Config() cfg.add(config.Scope.application, "track", "test.mode.enabled", True) - self.assertEqual( - self.as_track(expected_post_processed, complete_track_params=complete_track_params, index_body=index_body), - loader.TestModeTrackProcessor(cfg).on_after_load_track( + assert loader.TestModeTrackProcessor(cfg).on_after_load_track( self.as_track(track_specification, complete_track_params=complete_track_params, index_body=index_body) - ) - ) + ) == \ + self.as_track(expected_post_processed, complete_track_params=complete_track_params, index_body=index_body) - self.assertEqual( - ["number_of_replicas", "number_of_shards"], - complete_track_params.sorted_track_defined_params - ) + assert complete_track_params.sorted_track_defined_params == \ + ["number_of_replicas", "number_of_shards"] def as_track(self, track_specification, track_params=None, complete_track_params=None, index_body=None): reader = loader.TrackSpecificationReader( @@ -1316,8 +1301,8 @@ def test_sets_absolute_path(self, path_exists): loader.set_absolute_data_path(cfg, t) - self.assertEqual("/data/unittest/docs/documents.json", t.corpora[0].documents[0].document_file) - self.assertEqual("/data/unittest/docs/documents.json.bz2", t.corpora[0].documents[0].document_archive) + assert t.corpora[0].documents[0].document_file == "/data/unittest/docs/documents.json" + assert t.corpora[0].documents[0].document_archive == "/data/unittest/docs/documents.json.bz2" class TrackFilterTests(TestCase): @@ -1330,15 +1315,14 @@ def filter(self, track_specification, include_tasks=None, exclude_tasks=None): return processor.on_after_load_track(track_specification) def test_rejects_invalid_syntax(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: self.filter(track_specification=None, include_tasks=["valid", "a:b:c"]) - self.assertEqual("Invalid format for filtered tasks: [a:b:c]", ctx.exception.args[0]) + assert ctx.value.args[0] == "Invalid format for filtered tasks: [a:b:c]" def test_rejects_unknown_filter_type(self): - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: self.filter(track_specification=None, include_tasks=["valid", "op-type:index"]) - self.assertEqual("Invalid format for filtered tasks: [op-type:index]. Expected [type] but got [op-type].", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Invalid format for filtered tasks: [op-type:index]. Expected [type] but got [op-type]." def test_filters_tasks(self): track_specification = { @@ -1441,7 +1425,7 @@ def test_filters_tasks(self): } reader = loader.TrackSpecificationReader() full_track = reader("unittest", track_specification, "/mappings") - self.assertEqual(7, len(full_track.challenges[0].schedule)) + assert len(full_track.challenges[0].schedule) == 7 filtered = self.filter(full_track, include_tasks=["index-3", "type:search", @@ -1450,12 +1434,12 @@ def test_filters_tasks(self): "tag:include-me"]) schedule = filtered.challenges[0].schedule - self.assertEqual(5, len(schedule)) - self.assertEqual(["index-3", "match-all-parallel"], [t.name for t in schedule[0].tasks]) - self.assertEqual("match-all-serial", schedule[1].name) - self.assertEqual("cluster-stats", schedule[2].name) - self.assertEqual(["query-filtered", "index-4"], [t.name for t in schedule[3].tasks]) - self.assertEqual("final-cluster-stats", schedule[4].name) + assert len(schedule) == 5 + assert [t.name for t in schedule[0].tasks] == ["index-3", "match-all-parallel"] + assert schedule[1].name == "match-all-serial" + assert schedule[2].name == "cluster-stats" + assert [t.name for t in schedule[3].tasks] == ["query-filtered", "index-4"] + assert schedule[4].name == "final-cluster-stats" def test_filters_exclude_tasks(self): track_specification = { @@ -1533,15 +1517,15 @@ def test_filters_exclude_tasks(self): } reader = loader.TrackSpecificationReader() full_track = reader("unittest", track_specification, "/mappings") - self.assertEqual(5, len(full_track.challenges[0].schedule)) + assert len(full_track.challenges[0].schedule) == 5 filtered = self.filter(full_track, exclude_tasks=["index-3", "type:search", "create-index"]) schedule = filtered.challenges[0].schedule - self.assertEqual(3, len(schedule)) - self.assertEqual(["index-1", "index-2"], [t.name for t in schedule[0].tasks]) - self.assertEqual("node-stats", schedule[1].name) - self.assertEqual("cluster-stats", schedule[2].name) + assert len(schedule) == 3 + assert [t.name for t in schedule[0].tasks] == ["index-1", "index-2"] + assert schedule[1].name == "node-stats" + assert schedule[2].name == "cluster-stats" def test_unmatched_exclude_runs_everything(self): track_specification = { @@ -1601,13 +1585,13 @@ def test_unmatched_exclude_runs_everything(self): reader = loader.TrackSpecificationReader() full_track = reader("unittest", track_specification, "/mappings") - self.assertEqual(5, len(full_track.challenges[0].schedule)) + assert len(full_track.challenges[0].schedule) == 5 expected_schedule = full_track.challenges[0].schedule.copy() filtered = self.filter(full_track, exclude_tasks=["nothing"]) schedule = filtered.challenges[0].schedule - self.assertEqual(expected_schedule, schedule) + assert schedule == expected_schedule def test_unmatched_include_runs_nothing(self): track_specification = { @@ -1667,13 +1651,13 @@ def test_unmatched_include_runs_nothing(self): reader = loader.TrackSpecificationReader() full_track = reader("unittest", track_specification, "/mappings") - self.assertEqual(5, len(full_track.challenges[0].schedule)) + assert len(full_track.challenges[0].schedule) == 5 expected_schedule = [] filtered = self.filter(full_track, include_tasks=["nothing"]) schedule = filtered.challenges[0].schedule - self.assertEqual(expected_schedule, schedule) + assert schedule == expected_schedule # pylint: disable=too-many-public-methods @@ -1686,8 +1670,8 @@ def test_description_is_optional(self): reader = loader.TrackSpecificationReader() resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual("unittest", resulting_track.name) - self.assertEqual("", resulting_track.description) + assert resulting_track.name == "unittest" + assert resulting_track.description == "" def test_can_read_track_info(self): track_specification = { @@ -1700,8 +1684,8 @@ def test_can_read_track_info(self): } reader = loader.TrackSpecificationReader() resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual("unittest", resulting_track.name) - self.assertEqual("description for unit test", resulting_track.description) + assert resulting_track.name == "unittest" + assert resulting_track.description == "description for unit test" def test_document_count_mandatory_if_file_present(self): track_specification = { @@ -1717,9 +1701,9 @@ def test_document_count_mandatory_if_file_present(self): "challenges": [] } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Mandatory element 'document-count' is missing.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Mandatory element 'document-count' is missing." def test_parse_with_mixed_warmup_iterations_and_measurement(self): track_specification = { @@ -1770,11 +1754,10 @@ def test_parse_with_mixed_warmup_iterations_and_measurement(self): reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({ "/mappings/index.json": ['{"mappings": {"docs": "empty-for-test"}}'], })) - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' defines 3 warmup " - "iterations and a time period of 60 seconds but mixing time periods and iterations is not allowed.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' defines 3 warmup " \ + "iterations and a time period of 60 seconds but mixing time periods and iterations is not allowed." def test_parse_with_mixed_iterations_and_ramp_up(self): track_specification = { @@ -1826,12 +1809,11 @@ def test_parse_with_mixed_iterations_and_ramp_up(self): reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({ "/mappings/index.json": ['{"mappings": {"docs": "empty-for-test"}}'], })) - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' " - "defines a ramp-up time period of 120 seconds as well as 3 warmup iterations and 5 iterations " - "but mixing time periods and iterations is not allowed.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' " \ + "defines a ramp-up time period of 120 seconds as well as 3 warmup iterations and 5 iterations " \ + "but mixing time periods and iterations is not allowed." def test_parse_missing_challenge_or_challenges(self): track_specification = { @@ -1861,10 +1843,9 @@ def test_parse_missing_challenge_or_challenges(self): reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({ "/mappings/index.json": ['{"mappings": {"docs": "empty-for-test"}}'], })) - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. You must define 'challenge', 'challenges' or 'schedule' but none is specified.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. You must define 'challenge', 'challenges' or 'schedule' but none is specified." def test_parse_challenge_and_challenges_are_defined(self): track_specification = { @@ -1896,10 +1877,10 @@ def test_parse_challenge_and_challenges_are_defined(self): reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({ "/mappings/index.json": ['{"mappings": {"docs": "empty-for-test"}}'], })) - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Multiple out of 'challenge', 'challenges' or 'schedule' are defined but only " - "one of them is allowed.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Multiple out of 'challenge', 'challenges' or 'schedule' are defined but only " \ + "one of them is allowed." def test_parse_with_mixed_warmup_time_period_and_iterations(self): track_specification = { @@ -1950,11 +1931,10 @@ def test_parse_with_mixed_warmup_time_period_and_iterations(self): reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({ "/mappings/index.json": ['{"mappings": {"docs": "empty-for-test"}}'], })) - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' defines a warmup time " - "period of 20 seconds and 1000 iterations but mixing time periods and iterations is not allowed.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' defines a warmup time " \ + "period of 20 seconds and 1000 iterations but mixing time periods and iterations is not allowed." def test_parse_duplicate_implicit_task_names(self): track_specification = { @@ -1981,11 +1961,10 @@ def test_parse_duplicate_implicit_task_names(self): } } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Challenge 'default-challenge' contains multiple tasks with the name 'search'. Please" - " use the task's name property to assign a unique name for each task.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Challenge 'default-challenge' contains multiple tasks with the name 'search'. Please" \ + " use the task's name property to assign a unique name for each task." def test_parse_duplicate_explicit_task_names(self): track_specification = { @@ -2014,11 +1993,10 @@ def test_parse_duplicate_explicit_task_names(self): } } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Challenge 'default-challenge' contains multiple tasks with the name " - "'duplicate-task-name'. Please use the task's name property to assign a unique name for each task.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Challenge 'default-challenge' contains multiple tasks with the name " \ + "'duplicate-task-name'. Please use the task's name property to assign a unique name for each task." def test_load_invalid_index_body(self): track_specification = { @@ -2068,9 +2046,9 @@ def test_load_invalid_index_body(self): } """] })) - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Could not load file template for 'definition for index index-historical in body.json'", ctx.exception.args[0]) + assert ctx.value.args[0] == "Could not load file template for 'definition for index index-historical in body.json'" def test_parse_unique_task_names(self): track_specification = { @@ -2100,15 +2078,15 @@ def test_parse_unique_task_names(self): } reader = loader.TrackSpecificationReader(selected_challenge="default-challenge") resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual("unittest", resulting_track.name) + assert resulting_track.name == "unittest" challenge = resulting_track.challenges[0] - self.assertTrue(challenge.selected) + assert challenge.selected schedule = challenge.schedule - self.assertEqual(2, len(schedule)) - self.assertEqual("search-one-client", schedule[0].name) - self.assertEqual("search", schedule[0].operation.name) - self.assertEqual("search-two-clients", schedule[1].name) - self.assertEqual("search", schedule[1].operation.name) + assert len(schedule) == 2 + assert schedule[0].name == "search-one-client" + assert schedule[0].operation.name == "search" + assert schedule[1].name == "search-two-clients" + assert schedule[1].operation.name == "search" def test_parse_indices_valid_track_specification(self): track_specification = { @@ -2213,16 +2191,14 @@ def test_parse_indices_valid_track_specification(self): })) resulting_track = reader("unittest", track_specification, "/mappings") # j2 variables defined in the track -- used for checking mismatching user track params - self.assertEqual( - ["number_of_shards"], - complete_track_params.sorted_track_defined_params - ) - self.assertEqual("unittest", resulting_track.name) - self.assertEqual("description for unit test", resulting_track.description) + assert complete_track_params.sorted_track_defined_params == \ + ["number_of_shards"] + assert resulting_track.name == "unittest" + assert resulting_track.description == "description for unit test" # indices - self.assertEqual(1, len(resulting_track.indices)) - self.assertEqual("index-historical", resulting_track.indices[0].name) - self.assertDictEqual({ + assert len(resulting_track.indices) == 1 + assert resulting_track.indices[0].name == "index-historical" + assert resulting_track.indices[0].body == { "settings": { "number_of_shards": 3 }, @@ -2231,56 +2207,56 @@ def test_parse_indices_valid_track_specification(self): "main": "empty-for-test", "secondary": "empty-for-test" } - }, resulting_track.indices[0].body) - self.assertEqual(2, len(resulting_track.indices[0].types)) - self.assertEqual("main", resulting_track.indices[0].types[0]) - self.assertEqual("secondary", resulting_track.indices[0].types[1]) + } + assert len(resulting_track.indices[0].types) == 2 + assert resulting_track.indices[0].types[0] == "main" + assert resulting_track.indices[0].types[1] == "secondary" # corpora - self.assertEqual(1, len(resulting_track.corpora)) - self.assertEqual("test", resulting_track.corpora[0].name) - self.assertDictEqual({"test-corpus": True}, resulting_track.corpora[0].meta_data) - self.assertEqual(2, len(resulting_track.corpora[0].documents)) + assert len(resulting_track.corpora) == 1 + assert resulting_track.corpora[0].name == "test" + assert resulting_track.corpora[0].meta_data == {"test-corpus": True} + assert len(resulting_track.corpora[0].documents) == 2 docs_primary = resulting_track.corpora[0].documents[0] - self.assertEqual(track.Documents.SOURCE_FORMAT_BULK, docs_primary.source_format) - self.assertEqual("documents-main.json", docs_primary.document_file) - self.assertEqual("documents-main.json.bz2", docs_primary.document_archive) - self.assertEqual("https://localhost/data", docs_primary.base_url) - self.assertFalse(docs_primary.includes_action_and_meta_data) - self.assertEqual(10, docs_primary.number_of_documents) - self.assertEqual(100, docs_primary.compressed_size_in_bytes) - self.assertEqual(10000, docs_primary.uncompressed_size_in_bytes) - self.assertEqual("index-historical", docs_primary.target_index) - self.assertEqual("main", docs_primary.target_type) - self.assertDictEqual({ + assert docs_primary.source_format == track.Documents.SOURCE_FORMAT_BULK + assert docs_primary.document_file == "documents-main.json" + assert docs_primary.document_archive == "documents-main.json.bz2" + assert docs_primary.base_url == "https://localhost/data" + assert not docs_primary.includes_action_and_meta_data + assert docs_primary.number_of_documents == 10 + assert docs_primary.compressed_size_in_bytes == 100 + assert docs_primary.uncompressed_size_in_bytes == 10000 + assert docs_primary.target_index == "index-historical" + assert docs_primary.target_type == "main" + assert docs_primary.meta_data == { "test-docs": True, "role": "main" - }, docs_primary.meta_data) + } docs_secondary = resulting_track.corpora[0].documents[1] - self.assertEqual(track.Documents.SOURCE_FORMAT_BULK, docs_secondary.source_format) - self.assertEqual("documents-secondary.json", docs_secondary.document_file) - self.assertEqual("documents-secondary.json.bz2", docs_secondary.document_archive) - self.assertEqual("https://localhost/data", docs_secondary.base_url) - self.assertTrue(docs_secondary.includes_action_and_meta_data) - self.assertEqual(20, docs_secondary.number_of_documents) - self.assertEqual(200, docs_secondary.compressed_size_in_bytes) - self.assertEqual(20000, docs_secondary.uncompressed_size_in_bytes) + assert docs_secondary.source_format == track.Documents.SOURCE_FORMAT_BULK + assert docs_secondary.document_file == "documents-secondary.json" + assert docs_secondary.document_archive == "documents-secondary.json.bz2" + assert docs_secondary.base_url == "https://localhost/data" + assert docs_secondary.includes_action_and_meta_data + assert docs_secondary.number_of_documents == 20 + assert docs_secondary.compressed_size_in_bytes == 200 + assert docs_secondary.uncompressed_size_in_bytes == 20000 # This is defined by the action-and-meta-data line! - self.assertIsNone(docs_secondary.target_index) - self.assertIsNone(docs_secondary.target_type) - self.assertDictEqual({ + assert docs_secondary.target_index is None + assert docs_secondary.target_type is None + assert docs_secondary.meta_data == { "test-docs": True, "role": "secondary" - }, docs_secondary.meta_data) + } # challenges - self.assertEqual(1, len(resulting_track.challenges)) - self.assertEqual("default-challenge", resulting_track.challenges[0].name) - self.assertEqual("Default challenge", resulting_track.challenges[0].description) - self.assertEqual({"mixed": True, "max-clients": 8}, resulting_track.challenges[0].meta_data) - self.assertEqual({"append": True}, resulting_track.challenges[0].schedule[0].operation.meta_data) - self.assertEqual({"operation-index": 0}, resulting_track.challenges[0].schedule[0].meta_data) + assert len(resulting_track.challenges) == 1 + assert resulting_track.challenges[0].name == "default-challenge" + assert resulting_track.challenges[0].description == "Default challenge" + assert resulting_track.challenges[0].meta_data == {"mixed": True, "max-clients": 8} + assert resulting_track.challenges[0].schedule[0].operation.meta_data == {"append": True} + assert resulting_track.challenges[0].schedule[0].meta_data == {"operation-index": 0} def test_parse_data_streams_valid_track_specification(self): track_specification = { @@ -2363,62 +2339,62 @@ def test_parse_data_streams_valid_track_specification(self): complete_track_params=complete_track_params) resulting_track = reader("unittest", track_specification, "/mappings") # j2 variables defined in the track -- used for checking mismatching user track params - self.assertEqual("unittest", resulting_track.name) - self.assertEqual("description for unit test", resulting_track.description) + assert resulting_track.name == "unittest" + assert resulting_track.description == "description for unit test" # data streams - self.assertEqual(1, len(resulting_track.data_streams)) - self.assertEqual("data-stream-historical", resulting_track.data_streams[0].name) + assert len(resulting_track.data_streams) == 1 + assert resulting_track.data_streams[0].name == "data-stream-historical" # corpora - self.assertEqual(1, len(resulting_track.corpora)) - self.assertEqual("test", resulting_track.corpora[0].name) - self.assertEqual(3, len(resulting_track.corpora[0].documents)) + assert len(resulting_track.corpora) == 1 + assert resulting_track.corpora[0].name == "test" + assert len(resulting_track.corpora[0].documents) == 3 docs_primary = resulting_track.corpora[0].documents[0] - self.assertEqual(track.Documents.SOURCE_FORMAT_BULK, docs_primary.source_format) - self.assertEqual("documents-main.json", docs_primary.document_file) - self.assertEqual("documents-main.json.bz2", docs_primary.document_archive) - self.assertEqual("https://localhost/data", docs_primary.base_url) - self.assertFalse(docs_primary.includes_action_and_meta_data) - self.assertEqual(10, docs_primary.number_of_documents) - self.assertEqual(100, docs_primary.compressed_size_in_bytes) - self.assertEqual(10000, docs_primary.uncompressed_size_in_bytes) - self.assertEqual("data-stream-historical", docs_primary.target_data_stream) - self.assertIsNone(docs_primary.target_index) - self.assertIsNone(docs_primary.target_type) + assert docs_primary.source_format == track.Documents.SOURCE_FORMAT_BULK + assert docs_primary.document_file == "documents-main.json" + assert docs_primary.document_archive == "documents-main.json.bz2" + assert docs_primary.base_url == "https://localhost/data" + assert not docs_primary.includes_action_and_meta_data + assert docs_primary.number_of_documents == 10 + assert docs_primary.compressed_size_in_bytes == 100 + assert docs_primary.uncompressed_size_in_bytes == 10000 + assert docs_primary.target_data_stream == "data-stream-historical" + assert docs_primary.target_index is None + assert docs_primary.target_type is None docs_secondary = resulting_track.corpora[0].documents[1] - self.assertEqual(track.Documents.SOURCE_FORMAT_BULK, docs_secondary.source_format) - self.assertEqual("documents-secondary.json", docs_secondary.document_file) - self.assertEqual("documents-secondary.json.bz2", docs_secondary.document_archive) - self.assertEqual("https://localhost/data", docs_secondary.base_url) - self.assertTrue(docs_secondary.includes_action_and_meta_data) - self.assertEqual(20, docs_secondary.number_of_documents) - self.assertEqual(200, docs_secondary.compressed_size_in_bytes) - self.assertEqual(20000, docs_secondary.uncompressed_size_in_bytes) + assert docs_secondary.source_format == track.Documents.SOURCE_FORMAT_BULK + assert docs_secondary.document_file == "documents-secondary.json" + assert docs_secondary.document_archive == "documents-secondary.json.bz2" + assert docs_secondary.base_url == "https://localhost/data" + assert docs_secondary.includes_action_and_meta_data + assert docs_secondary.number_of_documents == 20 + assert docs_secondary.compressed_size_in_bytes == 200 + assert docs_secondary.uncompressed_size_in_bytes == 20000 # This is defined by the action-and-meta-data line! - self.assertIsNone(docs_secondary.target_data_stream) - self.assertIsNone(docs_secondary.target_index) - self.assertIsNone(docs_secondary.target_type) + assert docs_secondary.target_data_stream is None + assert docs_secondary.target_index is None + assert docs_secondary.target_type is None docs_tertiary = resulting_track.corpora[0].documents[2] - self.assertEqual(track.Documents.SOURCE_FORMAT_BULK, docs_tertiary.source_format) - self.assertEqual("documents-main.json", docs_tertiary.document_file) - self.assertEqual("documents-main.json.bz2", docs_tertiary.document_archive) - self.assertEqual("https://localhost/data", docs_tertiary.base_url) - self.assertFalse(docs_tertiary.includes_action_and_meta_data) - self.assertEqual(10, docs_tertiary.number_of_documents) - self.assertEqual(100, docs_tertiary.compressed_size_in_bytes) - self.assertIsNone(docs_tertiary.target_index) - self.assertIsNone(docs_tertiary.target_type) - self.assertEqual("data-stream-historical", docs_tertiary.target_data_stream) + assert docs_tertiary.source_format == track.Documents.SOURCE_FORMAT_BULK + assert docs_tertiary.document_file == "documents-main.json" + assert docs_tertiary.document_archive == "documents-main.json.bz2" + assert docs_tertiary.base_url == "https://localhost/data" + assert not docs_tertiary.includes_action_and_meta_data + assert docs_tertiary.number_of_documents == 10 + assert docs_tertiary.compressed_size_in_bytes == 100 + assert docs_tertiary.target_index is None + assert docs_tertiary.target_type is None + assert docs_tertiary.target_data_stream == "data-stream-historical" # challenges - self.assertEqual(1, len(resulting_track.challenges)) - self.assertEqual("default-challenge", resulting_track.challenges[0].name) - self.assertEqual("Default challenge", resulting_track.challenges[0].description) - self.assertEqual({"mixed": True, "max-clients": 8}, resulting_track.challenges[0].meta_data) - self.assertEqual({"append": True}, resulting_track.challenges[0].schedule[0].operation.meta_data) - self.assertEqual({"operation-index": 0}, resulting_track.challenges[0].schedule[0].meta_data) + assert len(resulting_track.challenges) == 1 + assert resulting_track.challenges[0].name == "default-challenge" + assert resulting_track.challenges[0].description == "Default challenge" + assert resulting_track.challenges[0].meta_data == {"mixed": True, "max-clients": 8} + assert resulting_track.challenges[0].schedule[0].operation.meta_data == {"append": True} + assert resulting_track.challenges[0].schedule[0].meta_data == {"operation-index": 0} def test_parse_valid_without_types(self): track_specification = { @@ -2467,37 +2443,37 @@ def test_parse_valid_without_types(self): """] })) resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual("unittest", resulting_track.name) - self.assertEqual("description for unit test", resulting_track.description) + assert resulting_track.name == "unittest" + assert resulting_track.description == "description for unit test" # indices - self.assertEqual(1, len(resulting_track.indices)) - self.assertEqual("index-historical", resulting_track.indices[0].name) - self.assertDictEqual({ + assert len(resulting_track.indices) == 1 + assert resulting_track.indices[0].name == "index-historical" + assert resulting_track.indices[0].body == { "settings": { "number_of_shards": 3 } - }, resulting_track.indices[0].body) - self.assertEqual(0, len(resulting_track.indices[0].types)) + } + assert len(resulting_track.indices[0].types) == 0 # corpora - self.assertEqual(1, len(resulting_track.corpora)) - self.assertEqual("test", resulting_track.corpora[0].name) - self.assertEqual(1, len(resulting_track.corpora[0].documents)) + assert len(resulting_track.corpora) == 1 + assert resulting_track.corpora[0].name == "test" + assert len(resulting_track.corpora[0].documents) == 1 docs_primary = resulting_track.corpora[0].documents[0] - self.assertEqual(track.Documents.SOURCE_FORMAT_BULK, docs_primary.source_format) - self.assertEqual("documents-main.json", docs_primary.document_file) - self.assertEqual("documents-main.json.bz2", docs_primary.document_archive) - self.assertEqual("https://localhost/data", docs_primary.base_url) - self.assertFalse(docs_primary.includes_action_and_meta_data) - self.assertEqual(10, docs_primary.number_of_documents) - self.assertEqual(100, docs_primary.compressed_size_in_bytes) - self.assertEqual(10000, docs_primary.uncompressed_size_in_bytes) - self.assertEqual("index-historical", docs_primary.target_index) - self.assertIsNone(docs_primary.target_type) - self.assertIsNone(docs_primary.target_data_stream) + assert docs_primary.source_format == track.Documents.SOURCE_FORMAT_BULK + assert docs_primary.document_file == "documents-main.json" + assert docs_primary.document_archive == "documents-main.json.bz2" + assert docs_primary.base_url == "https://localhost/data" + assert not docs_primary.includes_action_and_meta_data + assert docs_primary.number_of_documents == 10 + assert docs_primary.compressed_size_in_bytes == 100 + assert docs_primary.uncompressed_size_in_bytes == 10000 + assert docs_primary.target_index == "index-historical" + assert docs_primary.target_type is None + assert docs_primary.target_data_stream is None # challenges - self.assertEqual(1, len(resulting_track.challenges)) + assert len(resulting_track.challenges) == 1 def test_parse_invalid_data_streams_with_indices(self): track_specification = { @@ -2541,7 +2517,7 @@ def test_parse_invalid_data_streams_with_indices(self): complete_track_params = loader.CompleteTrackParams() reader = loader.TrackSpecificationReader( complete_track_params=complete_track_params) - with self.assertRaises(loader.TrackSyntaxError): + with pytest.raises(loader.TrackSyntaxError): reader("unittest", track_specification, "/mapping") def test_parse_invalid_data_streams_with_target_index(self): @@ -2581,7 +2557,7 @@ def test_parse_invalid_data_streams_with_target_index(self): complete_track_params = loader.CompleteTrackParams() reader = loader.TrackSpecificationReader( complete_track_params=complete_track_params) - with self.assertRaises(loader.TrackSyntaxError): + with pytest.raises(loader.TrackSyntaxError): reader("unittest", track_specification, "/mapping") def test_parse_invalid_data_streams_with_target_type(self): @@ -2621,7 +2597,7 @@ def test_parse_invalid_data_streams_with_target_type(self): complete_track_params = loader.CompleteTrackParams() reader = loader.TrackSpecificationReader( complete_track_params=complete_track_params) - with self.assertRaises(loader.TrackSyntaxError): + with pytest.raises(loader.TrackSyntaxError): reader("unittest", track_specification, "/mapping") def test_parse_invalid_no_data_stream_target(self): @@ -2663,7 +2639,7 @@ def test_parse_invalid_no_data_stream_target(self): complete_track_params = loader.CompleteTrackParams() reader = loader.TrackSpecificationReader( complete_track_params=complete_track_params) - with self.assertRaises(loader.TrackSyntaxError): + with pytest.raises(loader.TrackSyntaxError): reader("unittest", track_specification, "/mapping") def test_parse_valid_without_indices(self): @@ -2711,33 +2687,33 @@ def test_parse_valid_without_indices(self): """] })) resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual("unittest", resulting_track.name) - self.assertEqual("description for unit test", resulting_track.description) + assert resulting_track.name == "unittest" + assert resulting_track.description == "description for unit test" # indices - self.assertEqual(0, len(resulting_track.indices)) + assert len(resulting_track.indices) == 0 # data streams - self.assertEqual(1, len(resulting_track.data_streams)) - self.assertEqual("historical-data-stream", resulting_track.data_streams[0].name) + assert len(resulting_track.data_streams) == 1 + assert resulting_track.data_streams[0].name == "historical-data-stream" # corpora - self.assertEqual(1, len(resulting_track.corpora)) - self.assertEqual("test", resulting_track.corpora[0].name) - self.assertEqual(1, len(resulting_track.corpora[0].documents)) + assert len(resulting_track.corpora) == 1 + assert resulting_track.corpora[0].name == "test" + assert len(resulting_track.corpora[0].documents) == 1 docs_primary = resulting_track.corpora[0].documents[0] - self.assertEqual(track.Documents.SOURCE_FORMAT_BULK, docs_primary.source_format) - self.assertEqual("documents-main.json", docs_primary.document_file) - self.assertEqual("documents-main.json.bz2", docs_primary.document_archive) - self.assertEqual("https://localhost/data", docs_primary.base_url) - self.assertFalse(docs_primary.includes_action_and_meta_data) - self.assertEqual(10, docs_primary.number_of_documents) - self.assertEqual(100, docs_primary.compressed_size_in_bytes) - self.assertEqual(10000, docs_primary.uncompressed_size_in_bytes) - self.assertEqual("historical-data-stream", docs_primary.target_data_stream) - self.assertIsNone(docs_primary.target_type) - self.assertIsNone(docs_primary.target_index) + assert docs_primary.source_format == track.Documents.SOURCE_FORMAT_BULK + assert docs_primary.document_file == "documents-main.json" + assert docs_primary.document_archive == "documents-main.json.bz2" + assert docs_primary.base_url == "https://localhost/data" + assert not docs_primary.includes_action_and_meta_data + assert docs_primary.number_of_documents == 10 + assert docs_primary.compressed_size_in_bytes == 100 + assert docs_primary.uncompressed_size_in_bytes == 10000 + assert docs_primary.target_data_stream == "historical-data-stream" + assert docs_primary.target_type is None + assert docs_primary.target_index is None # challenges - self.assertEqual(1, len(resulting_track.challenges)) + assert len(resulting_track.challenges) == 1 def test_parse_valid_track_specification_with_index_template(self): track_specification = { @@ -2767,24 +2743,22 @@ def test_parse_valid_track_specification_with_index_template(self): """], })) resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual( - ["index_pattern", "number_of_shards"], - complete_track_params.sorted_track_defined_params - ) - self.assertEqual("unittest", resulting_track.name) - self.assertEqual("description for unit test", resulting_track.description) - self.assertEqual(0, len(resulting_track.indices)) - self.assertEqual(1, len(resulting_track.templates)) - self.assertEqual("my-index-template", resulting_track.templates[0].name) - self.assertEqual("*", resulting_track.templates[0].pattern) - self.assertDictEqual( + assert complete_track_params.sorted_track_defined_params == \ + ["index_pattern", "number_of_shards"] + assert resulting_track.name == "unittest" + assert resulting_track.description == "description for unit test" + assert len(resulting_track.indices) == 0 + assert len(resulting_track.templates) == 1 + assert resulting_track.templates[0].name == "my-index-template" + assert resulting_track.templates[0].pattern == "*" + assert resulting_track.templates[0].content == \ { "index_patterns": ["*"], "settings": { "number_of_shards": 1 } - }, resulting_track.templates[0].content) - self.assertEqual(0, len(resulting_track.challenges)) + } + assert len(resulting_track.challenges) == 0 def test_parse_valid_track_specification_with_composable_template(self): track_specification = { @@ -2852,20 +2826,18 @@ def test_parse_valid_track_specification_with_composable_template(self): """] })) resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual( - ["index_pattern", "number_of_replicas", "number_of_shards"], - complete_track_params.sorted_track_defined_params - ) - self.assertEqual("unittest", resulting_track.name) - self.assertEqual("description for unit test", resulting_track.description) - self.assertEqual(0, len(resulting_track.indices)) - self.assertEqual(1, len(resulting_track.composable_templates)) - self.assertEqual(2, len(resulting_track.component_templates)) - self.assertEqual("my-index-template", resulting_track.composable_templates[0].name) - self.assertEqual("*", resulting_track.composable_templates[0].pattern) - self.assertEqual("my-component-template-1", resulting_track.component_templates[0].name) - self.assertEqual("my-component-template-2", resulting_track.component_templates[1].name) - self.assertDictEqual( + assert complete_track_params.sorted_track_defined_params == \ + ["index_pattern", "number_of_replicas", "number_of_shards"] + assert resulting_track.name == "unittest" + assert resulting_track.description == "description for unit test" + assert len(resulting_track.indices) == 0 + assert len(resulting_track.composable_templates) == 1 + assert len(resulting_track.component_templates) == 2 + assert resulting_track.composable_templates[0].name == "my-index-template" + assert resulting_track.composable_templates[0].pattern == "*" + assert resulting_track.component_templates[0].name == "my-component-template-1" + assert resulting_track.component_templates[1].name == "my-component-template-2" + assert resulting_track.composable_templates[0].content == \ { "index_patterns": ["logs-*"], "template": { @@ -2874,16 +2846,16 @@ def test_parse_valid_track_specification_with_composable_template(self): } }, "composed_of": ["my-component-template-1", "my-component-template-2"] - }, resulting_track.composable_templates[0].content) - self.assertDictEqual( + } + assert resulting_track.component_templates[0].content == \ { "template": { "settings": { "index.number_of_shards": 2 } } - }, resulting_track.component_templates[0].content) - self.assertDictEqual( + } + assert resulting_track.component_templates[1].content == \ { "template": { "settings": { @@ -2897,8 +2869,8 @@ def test_parse_valid_track_specification_with_composable_template(self): } } } - }, resulting_track.component_templates[1].content) - self.assertEqual(0, len(resulting_track.challenges)) + } + assert len(resulting_track.challenges) == 0 def test_parse_invalid_track_specification_with_composable_template(self): track_specification = { @@ -2915,10 +2887,9 @@ def test_parse_invalid_track_specification_with_composable_template(self): reader = loader.TrackSpecificationReader( track_params={"index_pattern": "logs-*", "number_of_replicas": 1}, complete_track_params=complete_track_params) - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Mandatory element 'template' is missing.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Mandatory element 'template' is missing." def test_unique_challenge_names(self): track_specification = { @@ -2954,9 +2925,9 @@ def test_unique_challenge_names(self): ] } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Duplicate challenge with name 'test-challenge'.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Duplicate challenge with name 'test-challenge'." def test_not_more_than_one_default_challenge_possible(self): track_specification = { @@ -2993,10 +2964,10 @@ def test_not_more_than_one_default_challenge_possible(self): ] } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Both 'default-challenge' and 'another-challenge' are defined as default challenges. " - "Please define only one of them as default.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Both 'default-challenge' and 'another-challenge' are defined as default challenges. " \ + "Please define only one of them as default." def test_at_least_one_default_challenge(self): track_specification = { @@ -3029,10 +3000,10 @@ def test_at_least_one_default_challenge(self): ] } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. No default challenge specified. Please edit the track and add \"default\": true " - "to one of the challenges challenge, another-challenge.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. No default challenge specified. Please edit the track and add \"default\": true " \ + "to one of the challenges challenge, another-challenge." def test_exactly_one_default_challenge(self): track_specification = { @@ -3067,11 +3038,11 @@ def test_exactly_one_default_challenge(self): } reader = loader.TrackSpecificationReader(selected_challenge="another-challenge") resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual(2, len(resulting_track.challenges)) - self.assertEqual("challenge", resulting_track.challenges[0].name) - self.assertTrue(resulting_track.challenges[0].default) - self.assertFalse(resulting_track.challenges[1].default) - self.assertTrue(resulting_track.challenges[1].selected) + assert len(resulting_track.challenges) == 2 + assert resulting_track.challenges[0].name == "challenge" + assert resulting_track.challenges[0].default + assert not resulting_track.challenges[1].default + assert resulting_track.challenges[1].selected def test_selects_sole_challenge_implicitly_as_default(self): track_specification = { @@ -3094,10 +3065,10 @@ def test_selects_sole_challenge_implicitly_as_default(self): } reader = loader.TrackSpecificationReader() resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual(1, len(resulting_track.challenges)) - self.assertEqual("challenge", resulting_track.challenges[0].name) - self.assertTrue(resulting_track.challenges[0].default) - self.assertTrue(resulting_track.challenges[0].selected) + assert len(resulting_track.challenges) == 1 + assert resulting_track.challenges[0].name == "challenge" + assert resulting_track.challenges[0].default + assert resulting_track.challenges[0].selected def test_auto_generates_challenge_from_schedule(self): track_specification = { @@ -3117,10 +3088,10 @@ def test_auto_generates_challenge_from_schedule(self): } reader = loader.TrackSpecificationReader() resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual(1, len(resulting_track.challenges)) - self.assertTrue(resulting_track.challenges[0].auto_generated) - self.assertTrue(resulting_track.challenges[0].default) - self.assertTrue(resulting_track.challenges[0].selected) + assert len(resulting_track.challenges) == 1 + assert resulting_track.challenges[0].auto_generated + assert resulting_track.challenges[0].default + assert resulting_track.challenges[0].selected def test_inline_operations(self): track_specification = { @@ -3147,9 +3118,9 @@ def test_inline_operations(self): resulting_track = reader("unittest", track_specification, "/mappings") challenge = resulting_track.challenges[0] - self.assertEqual(2, len(challenge.schedule)) - self.assertEqual(track.OperationType.Bulk.to_hyphenated_string(), challenge.schedule[0].operation.type) - self.assertEqual(track.OperationType.ForceMerge.to_hyphenated_string(), challenge.schedule[1].operation.type) + assert len(challenge.schedule) == 2 + assert challenge.schedule[0].operation.type == track.OperationType.Bulk.to_hyphenated_string() + assert challenge.schedule[1].operation.type == track.OperationType.ForceMerge.to_hyphenated_string() def test_supports_target_throughput(self): track_specification = { @@ -3177,9 +3148,9 @@ def test_supports_target_throughput(self): resulting_track = reader("unittest", track_specification, "/mappings") indexing_task = resulting_track.challenges[0].schedule[0] - self.assertEqual(10, indexing_task.params["target-throughput"]) - self.assertEqual(120, indexing_task.warmup_time_period) - self.assertEqual(60, indexing_task.ramp_up_time_period) + assert indexing_task.params["target-throughput"] == 10 + assert indexing_task.warmup_time_period == 120 + assert indexing_task.ramp_up_time_period == 60 def test_supports_target_interval(self): track_specification = { @@ -3205,7 +3176,7 @@ def test_supports_target_interval(self): } reader = loader.TrackSpecificationReader() resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual(5, resulting_track.challenges[0].schedule[0].params["target-interval"]) + assert resulting_track.challenges[0].schedule[0].params["target-interval"] == 5 def test_ramp_up_but_no_warmup(self): track_specification = { @@ -3230,11 +3201,10 @@ def test_ramp_up_but_no_warmup(self): } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' " - "defines a ramp-up time period of 60 seconds but no warmup-time-period.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. Operation 'index-append' in challenge 'default-challenge' " \ + "defines a ramp-up time period of 60 seconds but no warmup-time-period." def test_warmup_shorter_than_ramp_up(self): track_specification = { @@ -3260,11 +3230,11 @@ def test_warmup_shorter_than_ramp_up(self): } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. The warmup-time-period of operation 'index-append' in " - "challenge 'default-challenge' is 59 seconds but must be greater than or equal to the " - "ramp-up-time-period of 60 seconds.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. The warmup-time-period of operation 'index-append' in " \ + "challenge 'default-challenge' is 59 seconds but must be greater than or equal to the " \ + "ramp-up-time-period of 60 seconds." @@ -3323,29 +3293,29 @@ def test_parallel_tasks_with_default_values(self): parallel_element = resulting_track.challenges[0].schedule[0] parallel_tasks = parallel_element.tasks - self.assertEqual(22, parallel_element.clients) - self.assertEqual(3, len(parallel_tasks)) - - self.assertEqual("index-1", parallel_tasks[0].operation.name) - self.assertEqual(300, parallel_tasks[0].ramp_up_time_period) - self.assertEqual(300, parallel_tasks[0].warmup_time_period) - self.assertEqual(36000, parallel_tasks[0].time_period) - self.assertEqual(2, parallel_tasks[0].clients) - self.assertFalse("target-throughput" in parallel_tasks[0].params) - - self.assertEqual("index-2", parallel_tasks[1].operation.name) - self.assertEqual(300, parallel_tasks[1].ramp_up_time_period) - self.assertEqual(2400, parallel_tasks[1].warmup_time_period) - self.assertEqual(3600, parallel_tasks[1].time_period) - self.assertEqual(4, parallel_tasks[1].clients) - self.assertFalse("target-throughput" in parallel_tasks[1].params) - - self.assertEqual("index-3", parallel_tasks[2].operation.name) - self.assertEqual(300, parallel_tasks[2].ramp_up_time_period) - self.assertEqual(2400, parallel_tasks[2].warmup_time_period) - self.assertEqual(36000, parallel_tasks[2].time_period) - self.assertEqual(16, parallel_tasks[2].clients) - self.assertEqual(10, parallel_tasks[2].params["target-throughput"]) + assert parallel_element.clients == 22 + assert len(parallel_tasks) == 3 + + assert parallel_tasks[0].operation.name == "index-1" + assert parallel_tasks[0].ramp_up_time_period == 300 + assert parallel_tasks[0].warmup_time_period == 300 + assert parallel_tasks[0].time_period == 36000 + assert parallel_tasks[0].clients == 2 + assert "target-throughput" not in parallel_tasks[0].params + + assert parallel_tasks[1].operation.name == "index-2" + assert parallel_tasks[1].ramp_up_time_period == 300 + assert parallel_tasks[1].warmup_time_period == 2400 + assert parallel_tasks[1].time_period == 3600 + assert parallel_tasks[1].clients == 4 + assert "target-throughput" not in parallel_tasks[1].params + + assert parallel_tasks[2].operation.name == "index-3" + assert parallel_tasks[2].ramp_up_time_period == 300 + assert parallel_tasks[2].warmup_time_period == 2400 + assert parallel_tasks[2].time_period == 36000 + assert parallel_tasks[2].clients == 16 + assert parallel_tasks[2].params["target-throughput"] == 10 def test_parallel_tasks_with_default_clients_does_not_propagate(self): track_specification = { @@ -3396,10 +3366,10 @@ def test_parallel_tasks_with_default_clients_does_not_propagate(self): parallel_tasks = parallel_element.tasks # we will only have two clients *in total* - self.assertEqual(2, parallel_element.clients) - self.assertEqual(4, len(parallel_tasks)) + assert parallel_element.clients == 2 + assert len(parallel_tasks) == 4 for task in parallel_tasks: - self.assertEqual(1, task.clients) + assert task.clients == 1 def test_parallel_tasks_with_completed_by_set(self): track_specification = { @@ -3444,14 +3414,14 @@ def test_parallel_tasks_with_completed_by_set(self): parallel_tasks = parallel_element.tasks # we will only have two clients *in total* - self.assertEqual(2, parallel_element.clients) - self.assertEqual(2, len(parallel_tasks)) + assert parallel_element.clients == 2 + assert len(parallel_tasks) == 2 - self.assertEqual("index-1", parallel_tasks[0].operation.name) - self.assertFalse(parallel_tasks[0].completes_parent) + assert parallel_tasks[0].operation.name == "index-1" + assert not parallel_tasks[0].completes_parent - self.assertEqual("index-2", parallel_tasks[1].operation.name) - self.assertTrue(parallel_tasks[1].completes_parent) + assert parallel_tasks[1].operation.name == "index-2" + assert parallel_tasks[1].completes_parent def test_parallel_tasks_with_named_task_completed_by_set(self): track_specification = { @@ -3498,14 +3468,14 @@ def test_parallel_tasks_with_named_task_completed_by_set(self): parallel_tasks = parallel_element.tasks # we will only have two clients *in total* - self.assertEqual(2, parallel_element.clients) - self.assertEqual(2, len(parallel_tasks)) + assert parallel_element.clients == 2 + assert len(parallel_tasks) == 2 - self.assertEqual("index-1", parallel_tasks[0].operation.name) - self.assertFalse(parallel_tasks[0].completes_parent) + assert parallel_tasks[0].operation.name == "index-1" + assert not parallel_tasks[0].completes_parent - self.assertEqual("index-2", parallel_tasks[1].operation.name) - self.assertTrue(parallel_tasks[1].completes_parent) + assert parallel_tasks[1].operation.name == "index-2" + assert parallel_tasks[1].completes_parent def test_parallel_tasks_with_completed_by_set_no_task_matches(self): track_specification = { @@ -3544,10 +3514,10 @@ def test_parallel_tasks_with_completed_by_set_no_task_matches(self): } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. 'parallel' element for challenge 'default-challenge' is marked with 'completed-by' " - "with task name 'non-existing-task' but no task with this name exists.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. 'parallel' element for challenge 'default-challenge' is marked with 'completed-by' " \ + "with task name 'non-existing-task' but no task with this name exists." def test_parallel_tasks_with_completed_by_set_multiple_tasks_match(self): track_specification = { @@ -3582,11 +3552,10 @@ def test_parallel_tasks_with_completed_by_set_multiple_tasks_match(self): } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. 'parallel' element for challenge 'default-challenge' contains multiple tasks with " - "the name 'index-1' marked with 'completed-by' but only task is allowed to match.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. 'parallel' element for challenge 'default-challenge' contains multiple tasks with " \ + "the name 'index-1' marked with 'completed-by' but only task is allowed to match." def test_parallel_tasks_ramp_up_cannot_be_overridden(self): track_specification = { @@ -3630,11 +3599,10 @@ def test_parallel_tasks_ramp_up_cannot_be_overridden(self): } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. task 'name-index-1' specifies a different ramp-up-time-period " - "than its enclosing 'parallel' element in challenge 'default-challenge'.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. task 'name-index-1' specifies a different ramp-up-time-period " \ + "than its enclosing 'parallel' element in challenge 'default-challenge'." def test_parallel_tasks_ramp_up_only_on_parallel(self): track_specification = { @@ -3677,12 +3645,11 @@ def test_parallel_tasks_ramp_up_only_on_parallel(self): } reader = loader.TrackSpecificationReader() - with self.assertRaises(loader.TrackSyntaxError) as ctx: + with pytest.raises(loader.TrackSyntaxError) as ctx: reader("unittest", track_specification, "/mappings") - self.assertEqual("Track 'unittest' is invalid. task 'name-index-1' in 'parallel' element of challenge " - "'default-challenge' specifies a ramp-up-time-period but it is only allowed on the 'parallel' " - "element.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Track 'unittest' is invalid. task 'name-index-1' in 'parallel' element of challenge " \ + "'default-challenge' specifies a ramp-up-time-period but it is only allowed on the 'parallel' " \ + "element." def test_propagate_parameters_to_challenge_level(self): track_specification = { @@ -3725,21 +3692,21 @@ def test_propagate_parameters_to_challenge_level(self): } reader = loader.TrackSpecificationReader(selected_challenge="another-challenge") resulting_track = reader("unittest", track_specification, "/mappings") - self.assertEqual(2, len(resulting_track.challenges)) - self.assertEqual("challenge", resulting_track.challenges[0].name) - self.assertTrue(resulting_track.challenges[0].default) - self.assertDictEqual({ + assert len(resulting_track.challenges) == 2 + assert resulting_track.challenges[0].name == "challenge" + assert resulting_track.challenges[0].default + assert resulting_track.challenges[0].parameters == { "level": "challenge", "value": 7, "another-value": 17 - }, resulting_track.challenges[0].parameters) + } - self.assertFalse(resulting_track.challenges[1].default) - self.assertTrue(resulting_track.challenges[1].selected) - self.assertDictEqual({ + assert not resulting_track.challenges[1].default + assert resulting_track.challenges[1].selected + assert resulting_track.challenges[1].parameters == { "level": "track", "value": 7 - }, resulting_track.challenges[1].parameters) + } class MyMockTrackProcessor(loader.TrackProcessor): diff --git a/tests/track/params_test.py b/tests/track/params_test.py index 4ca01dc37..5c5db159d 100644 --- a/tests/track/params_test.py +++ b/tests/track/params_test.py @@ -19,6 +19,8 @@ import random from unittest import TestCase +import pytest + from esrally import exceptions from esrally.track import params, track from esrally.utils import io @@ -64,7 +66,7 @@ def test_slice_with_source_larger_than_slice(self): source.open(data, "r", 5) # lines are returned as a list so we have to wrap our data once more - self.assertEqual([data[2:7]], list(source)) + assert list(source) == [data[2:7]] source.close() def test_slice_with_slice_larger_than_source(self): @@ -77,17 +79,17 @@ def test_slice_with_slice_larger_than_source(self): source.open(data, "r", 5) # lines are returned as a list so we have to wrap our data once more - self.assertEqual([data], list(source)) + assert list(source) == [data] source.close() class ConflictingIdsBuilderTests(TestCase): def test_no_id_conflicts(self): - self.assertIsNone(params.build_conflicting_ids(None, 100, 0)) - self.assertIsNone(params.build_conflicting_ids(params.IndexIdConflict.NoConflicts, 100, 0)) + assert params.build_conflicting_ids(None, 100, 0) is None + assert params.build_conflicting_ids(params.IndexIdConflict.NoConflicts, 100, 0) is None def test_sequential_conflicts(self): - self.assertEqual( + assert params.build_conflicting_ids(params.IndexIdConflict.SequentialConflicts, 11, 0) == \ [ '0000000000', '0000000001', @@ -100,11 +102,9 @@ def test_sequential_conflicts(self): '0000000008', '0000000009', '0000000010' - ], - params.build_conflicting_ids(params.IndexIdConflict.SequentialConflicts, 11, 0) - ) + ] - self.assertEqual( + assert params.build_conflicting_ids(params.IndexIdConflict.SequentialConflicts, 11, 5) == \ [ '0000000005', '0000000006', @@ -117,46 +117,36 @@ def test_sequential_conflicts(self): '0000000013', '0000000014', '0000000015' - ], - params.build_conflicting_ids(params.IndexIdConflict.SequentialConflicts, 11, 5) - ) + ] def test_random_conflicts(self): predictable_shuffle = list.reverse - self.assertEqual( + assert params.build_conflicting_ids(params.IndexIdConflict.RandomConflicts, 3, 0, shuffle=predictable_shuffle) == \ [ '0000000002', '0000000001', '0000000000' - ], - params.build_conflicting_ids(params.IndexIdConflict.RandomConflicts, 3, 0, shuffle=predictable_shuffle) - ) + ] - self.assertEqual( + assert params.build_conflicting_ids(params.IndexIdConflict.RandomConflicts, 3, 5, shuffle=predictable_shuffle) == \ [ '0000000007', '0000000006', '0000000005' - ], - params.build_conflicting_ids(params.IndexIdConflict.RandomConflicts, 3, 5, shuffle=predictable_shuffle) - ) + ] class ActionMetaDataTests(TestCase): def test_generate_action_meta_data_without_id_conflicts(self): - self.assertEqual(("index", '{"index": {"_index": "test_index", "_type": "test_type"}}\n'), - next(params.GenerateActionMetaData("test_index", "test_type"))) + assert next(params.GenerateActionMetaData("test_index", "test_type")) == ("index", '{"index": {"_index": "test_index", "_type": "test_type"}}\n') def test_generate_action_meta_data_create(self): - self.assertEqual(("create", '{"create": {"_index": "test_index"}}\n'), - next(params.GenerateActionMetaData("test_index", None, use_create=True))) + assert next(params.GenerateActionMetaData("test_index", None, use_create=True)) == ("create", '{"create": {"_index": "test_index"}}\n') def test_generate_action_meta_data_create_with_conflicts(self): - with self.assertRaises(exceptions.RallyError) as ctx: + with pytest.raises(exceptions.RallyError) as ctx: params.GenerateActionMetaData("test_index", None, conflicting_ids=[100, 200, 300, 400], use_create=True) - self.assertEqual("Index mode '_create' cannot be used with conflicting ids", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Index mode '_create' cannot be used with conflicting ids" def test_generate_action_meta_data_typeless(self): - self.assertEqual(("index", '{"index": {"_index": "test_index"}}\n'), - next(params.GenerateActionMetaData("test_index", type_name=None))) + assert next(params.GenerateActionMetaData("test_index", type_name=None)) == ("index", '{"index": {"_index": "test_index"}}\n') def test_generate_action_meta_data_with_id_conflicts(self): def idx(id): @@ -193,15 +183,15 @@ def conflict(action, id): randint=lambda x, y: next(chosen_index_of_conflicting_ids)) # first one is always *not* drawn from a random index - self.assertEqual(idx("100"), next(generator)) + assert next(generator) == idx("100") # now we start using random ids, i.e. look in the first line of the pseudo-random sequence - self.assertEqual(conflict(conflict_action, "200"), next(generator)) - self.assertEqual(conflict(conflict_action, "400"), next(generator)) - self.assertEqual(conflict(conflict_action, "300"), next(generator)) + assert next(generator) == conflict(conflict_action, "200") + assert next(generator) == conflict(conflict_action, "400") + assert next(generator) == conflict(conflict_action, "300") # no conflict -> we draw the next sequential one, which is 200 - self.assertEqual(idx("200"), next(generator)) + assert next(generator) == idx("200") # and we're back to random - self.assertEqual(conflict(conflict_action, "100"), next(generator)) + assert next(generator) == conflict(conflict_action, "100") def test_generate_action_meta_data_with_id_conflicts_and_recency_bias(self): def idx(type_name, id): @@ -264,18 +254,18 @@ def conflict(action, type_name, id): ) # first one is always *not* drawn from a random index - self.assertEqual(idx(type_name, "100"), next(generator)) + assert next(generator) == idx(type_name, "100") # now we start using random ids - self.assertEqual(conflict(conflict_action, type_name, "100"), next(generator)) - self.assertEqual(conflict(conflict_action, type_name, "100"), next(generator)) - self.assertEqual(conflict(conflict_action, type_name, "100"), next(generator)) + assert next(generator) == conflict(conflict_action, type_name, "100") + assert next(generator) == conflict(conflict_action, type_name, "100") + assert next(generator) == conflict(conflict_action, type_name, "100") # no conflict - self.assertEqual(idx(type_name, "200"), next(generator)) - self.assertEqual(idx(type_name, "300"), next(generator)) - self.assertEqual(idx(type_name, "400"), next(generator)) + assert next(generator) == idx(type_name, "200") + assert next(generator) == idx(type_name, "300") + assert next(generator) == idx(type_name, "400") # conflict - self.assertEqual(conflict(conflict_action, type_name, "400"), next(generator)) - self.assertEqual(conflict(conflict_action, type_name, "300"), next(generator)) + assert next(generator) == conflict(conflict_action, type_name, "400") + assert next(generator) == conflict(conflict_action, type_name, "300") def test_generate_action_meta_data_with_id_and_zero_conflict_probability(self): def idx(id): @@ -287,7 +277,7 @@ def idx(id): conflicting_ids=test_ids, conflict_probability=0) - self.assertListEqual([idx(id) for id in test_ids], list(generator)) + assert list(generator) == [idx(id) for id in test_ids] class IndexDataReaderTests(TestCase): @@ -481,7 +471,7 @@ def test_read_bulk_with_id_conflicts(self): for bulk_size, bulk in batch: bulks.append(bulk) - self.assertEqual([ + assert bulks == [ b'{"index": {"_index": "test_index", "_type": "test_type", "_id": "100"}}\n' + b'{"key": "value1"}\n' + b'{"update": {"_index": "test_index", "_type": "test_type", "_id": "200"}}\n' + @@ -492,7 +482,7 @@ def test_read_bulk_with_id_conflicts(self): b'{"doc":{"key": "value4"}}\n', b'{"index": {"_index": "test_index", "_type": "test_type", "_id": "200"}}\n' + b'{"key": "value5"}\n' - ], bulks) + ] def test_read_bulk_with_external_id_and_zero_conflict_probability(self): data = [ @@ -523,7 +513,7 @@ def test_read_bulk_with_external_id_and_zero_conflict_probability(self): for bulk_size, bulk in batch: bulks.append(bulk) - self.assertEqual([ + assert bulks == [ b'{"index": {"_index": "test_index", "_type": "test_type", "_id": "100"}}\n' + b'{"key": "value1"}\n' + b'{"index": {"_index": "test_index", "_type": "test_type", "_id": "200"}}\n' + @@ -533,18 +523,18 @@ def test_read_bulk_with_external_id_and_zero_conflict_probability(self): b'{"key": "value3"}\n' + b'{"index": {"_index": "test_index", "_type": "test_type", "_id": "400"}}\n' + b'{"key": "value4"}\n' - ], bulks) + ] def assert_bulks_sized(self, reader, expected_bulk_sizes, expected_line_sizes): - self.assertEqual(len(expected_bulk_sizes), len(expected_line_sizes), "Bulk sizes and line sizes must be equal") + assert len(expected_line_sizes) == len(expected_bulk_sizes), "Bulk sizes and line sizes must be equal" with reader: bulk_index = 0 for _, _, batch in reader: for bulk_size, bulk in batch: - self.assertEqual(expected_bulk_sizes[bulk_index], bulk_size, msg="bulk size") - self.assertEqual(expected_line_sizes[bulk_index], bulk.count(b"\n")) + assert bulk_size == expected_bulk_sizes[bulk_index], "bulk size" + assert bulk.count(b"\n") == expected_line_sizes[bulk_index] bulk_index += 1 - self.assertEqual(len(expected_bulk_sizes), bulk_index, "Not all bulk sizes have been checked") + assert bulk_index == len(expected_bulk_sizes), "Not all bulk sizes have been checked" class InvocationGeneratorTests(TestCase): @@ -585,107 +575,107 @@ def test_iterator_chaining_respects_context_manager(self): i0 = InvocationGeneratorTests.TestIndexReader([1, 2, 3]) i1 = InvocationGeneratorTests.TestIndexReader([4, 5, 6]) - self.assertEqual([1, 2, 3, 4, 5, 6], list(params.chain(i0, i1))) - self.assertEqual(1, i0.enter_count) - self.assertEqual(1, i0.exit_count) - self.assertEqual(1, i1.enter_count) - self.assertEqual(1, i1.exit_count) + assert list(params.chain(i0, i1)) == [1, 2, 3, 4, 5, 6] + assert i0.enter_count == 1 + assert i0.exit_count == 1 + assert i1.enter_count == 1 + assert i1.exit_count == 1 def test_calculate_bounds(self): num_docs = 1000 clients = 1 - self.assertEqual((0, 1000, 1000), params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=False)) - self.assertEqual((0, 1000, 2000), params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=True)) + assert params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=False) == (0, 1000, 1000) + assert params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=True) == (0, 1000, 2000) num_docs = 1000 clients = 2 - self.assertEqual((0, 500, 500), params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=False)) - self.assertEqual((500, 500, 500), params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=False)) + assert params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=False) == (0, 500, 500) + assert params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=False) == (500, 500, 500) num_docs = 800 clients = 4 - self.assertEqual((0, 200, 400), params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=True)) - self.assertEqual((400, 200, 400), params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=True)) - self.assertEqual((800, 200, 400), params.bounds(num_docs, 2, 2, clients, includes_action_and_meta_data=True)) - self.assertEqual((1200, 200, 400), params.bounds(num_docs, 3, 3, clients, includes_action_and_meta_data=True)) + assert params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=True) == (0, 200, 400) + assert params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=True) == (400, 200, 400) + assert params.bounds(num_docs, 2, 2, clients, includes_action_and_meta_data=True) == (800, 200, 400) + assert params.bounds(num_docs, 3, 3, clients, includes_action_and_meta_data=True) == (1200, 200, 400) num_docs = 2000 clients = 8 - self.assertEqual((0, 250, 250), params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=False)) - self.assertEqual((250, 250, 250), params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=False)) - self.assertEqual((500, 250, 250), params.bounds(num_docs, 2, 2, clients, includes_action_and_meta_data=False)) - self.assertEqual((750, 250, 250), params.bounds(num_docs, 3, 3, clients, includes_action_and_meta_data=False)) - self.assertEqual((1000, 250, 250), params.bounds(num_docs, 4, 4, clients, includes_action_and_meta_data=False)) - self.assertEqual((1250, 250, 250), params.bounds(num_docs, 5, 5, clients, includes_action_and_meta_data=False)) - self.assertEqual((1500, 250, 250), params.bounds(num_docs, 6, 6, clients, includes_action_and_meta_data=False)) - self.assertEqual((1750, 250, 250), params.bounds(num_docs, 7, 7, clients, includes_action_and_meta_data=False)) + assert params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=False) == (0, 250, 250) + assert params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=False) == (250, 250, 250) + assert params.bounds(num_docs, 2, 2, clients, includes_action_and_meta_data=False) == (500, 250, 250) + assert params.bounds(num_docs, 3, 3, clients, includes_action_and_meta_data=False) == (750, 250, 250) + assert params.bounds(num_docs, 4, 4, clients, includes_action_and_meta_data=False) == (1000, 250, 250) + assert params.bounds(num_docs, 5, 5, clients, includes_action_and_meta_data=False) == (1250, 250, 250) + assert params.bounds(num_docs, 6, 6, clients, includes_action_and_meta_data=False) == (1500, 250, 250) + assert params.bounds(num_docs, 7, 7, clients, includes_action_and_meta_data=False) == (1750, 250, 250) def test_calculate_non_multiple_bounds_16_clients(self): # in this test case, each client would need to read 1333.3333 lines. Instead we let most clients read 1333 # lines and every third client, one line more (1334). num_docs = 16000 clients = 12 - self.assertEqual((0, 1333, 1333), params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=False)) - self.assertEqual((1333, 1334, 1334), params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=False)) - self.assertEqual((2667, 1333, 1333), params.bounds(num_docs, 2, 2, clients, includes_action_and_meta_data=False)) - self.assertEqual((4000, 1333, 1333), params.bounds(num_docs, 3, 3, clients, includes_action_and_meta_data=False)) - self.assertEqual((5333, 1334, 1334), params.bounds(num_docs, 4, 4, clients, includes_action_and_meta_data=False)) - self.assertEqual((6667, 1333, 1333), params.bounds(num_docs, 5, 5, clients, includes_action_and_meta_data=False)) - self.assertEqual((8000, 1333, 1333), params.bounds(num_docs, 6, 6, clients, includes_action_and_meta_data=False)) - self.assertEqual((9333, 1334, 1334), params.bounds(num_docs, 7, 7, clients, includes_action_and_meta_data=False)) - self.assertEqual((10667, 1333, 1333), params.bounds(num_docs, 8, 8, clients, includes_action_and_meta_data=False)) - self.assertEqual((12000, 1333, 1333), params.bounds(num_docs, 9, 9, clients, includes_action_and_meta_data=False)) - self.assertEqual((13333, 1334, 1334), params.bounds(num_docs, 10, 10, clients, includes_action_and_meta_data=False)) - self.assertEqual((14667, 1333, 1333), params.bounds(num_docs, 11, 11, clients, includes_action_and_meta_data=False)) + assert params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=False) == (0, 1333, 1333) + assert params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=False) == (1333, 1334, 1334) + assert params.bounds(num_docs, 2, 2, clients, includes_action_and_meta_data=False) == (2667, 1333, 1333) + assert params.bounds(num_docs, 3, 3, clients, includes_action_and_meta_data=False) == (4000, 1333, 1333) + assert params.bounds(num_docs, 4, 4, clients, includes_action_and_meta_data=False) == (5333, 1334, 1334) + assert params.bounds(num_docs, 5, 5, clients, includes_action_and_meta_data=False) == (6667, 1333, 1333) + assert params.bounds(num_docs, 6, 6, clients, includes_action_and_meta_data=False) == (8000, 1333, 1333) + assert params.bounds(num_docs, 7, 7, clients, includes_action_and_meta_data=False) == (9333, 1334, 1334) + assert params.bounds(num_docs, 8, 8, clients, includes_action_and_meta_data=False) == (10667, 1333, 1333) + assert params.bounds(num_docs, 9, 9, clients, includes_action_and_meta_data=False) == (12000, 1333, 1333) + assert params.bounds(num_docs, 10, 10, clients, includes_action_and_meta_data=False) == (13333, 1334, 1334) + assert params.bounds(num_docs, 11, 11, clients, includes_action_and_meta_data=False) == (14667, 1333, 1333) def test_calculate_non_multiple_bounds_6_clients(self): # With 3500 docs and 6 clients, every client needs to read 583.33 docs. We have two lines per doc, which makes it # 2 * 583.333 docs = 1166.6666 lines per client. We let them read 1166 and 1168 lines respectively (583 and 584 docs). num_docs = 3500 clients = 6 - self.assertEqual((0, 583, 1166), params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=True)) - self.assertEqual((1166, 584, 1168), params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=True)) - self.assertEqual((2334, 583, 1166), params.bounds(num_docs, 2, 2, clients, includes_action_and_meta_data=True)) - self.assertEqual((3500, 583, 1166), params.bounds(num_docs, 3, 3, clients, includes_action_and_meta_data=True)) - self.assertEqual((4666, 584, 1168), params.bounds(num_docs, 4, 4, clients, includes_action_and_meta_data=True)) - self.assertEqual((5834, 583, 1166), params.bounds(num_docs, 5, 5, clients, includes_action_and_meta_data=True)) + assert params.bounds(num_docs, 0, 0, clients, includes_action_and_meta_data=True) == (0, 583, 1166) + assert params.bounds(num_docs, 1, 1, clients, includes_action_and_meta_data=True) == (1166, 584, 1168) + assert params.bounds(num_docs, 2, 2, clients, includes_action_and_meta_data=True) == (2334, 583, 1166) + assert params.bounds(num_docs, 3, 3, clients, includes_action_and_meta_data=True) == (3500, 583, 1166) + assert params.bounds(num_docs, 4, 4, clients, includes_action_and_meta_data=True) == (4666, 584, 1168) + assert params.bounds(num_docs, 5, 5, clients, includes_action_and_meta_data=True) == (5834, 583, 1166) def test_calculate_bounds_for_multiple_clients_per_worker(self): num_docs = 2000 clients = 8 # four clients per worker, each reads 250 lines - self.assertEqual((0, 1000, 1000), params.bounds(num_docs, 0, 3, clients, includes_action_and_meta_data=False)) - self.assertEqual((1000, 1000, 1000), params.bounds(num_docs, 4, 7, clients, includes_action_and_meta_data=False)) + assert params.bounds(num_docs, 0, 3, clients, includes_action_and_meta_data=False) == (0, 1000, 1000) + assert params.bounds(num_docs, 4, 7, clients, includes_action_and_meta_data=False) == (1000, 1000, 1000) # four clients per worker, each reads 500 lines (includes action and metadata) - self.assertEqual((0, 1000, 2000), params.bounds(num_docs, 0, 3, clients, includes_action_and_meta_data=True)) - self.assertEqual((2000, 1000, 2000), params.bounds(num_docs, 4, 7, clients, includes_action_and_meta_data=True)) + assert params.bounds(num_docs, 0, 3, clients, includes_action_and_meta_data=True) == (0, 1000, 2000) + assert params.bounds(num_docs, 4, 7, clients, includes_action_and_meta_data=True) == (2000, 1000, 2000) def test_calculate_number_of_bulks(self): docs1 = self.docs(1) docs2 = self.docs(2) - self.assertEqual(1, self.number_of_bulks([self.corpus("a", [docs1])], 0, 0, 1, 1)) - self.assertEqual(1, self.number_of_bulks([self.corpus("a", [docs1])], 0, 0, 1, 2)) - self.assertEqual(20, self.number_of_bulks( + assert self.number_of_bulks([self.corpus("a", [docs1])], 0, 0, 1, 1) == 1 + assert self.number_of_bulks([self.corpus("a", [docs1])], 0, 0, 1, 2) == 1 + assert self.number_of_bulks( [self.corpus("a", [docs2, docs2, docs2, docs2, docs1]), - self.corpus("b", [docs2, docs2, docs2, docs2, docs2, docs1])], 0, 0, 1, 1)) - self.assertEqual(11, self.number_of_bulks( + self.corpus("b", [docs2, docs2, docs2, docs2, docs2, docs1])], 0, 0, 1, 1) == 20 + assert self.number_of_bulks( [self.corpus("a", [docs2, docs2, docs2, docs2, docs1]), - self.corpus("b", [docs2, docs2, docs2, docs2, docs2, docs1])], 0, 0, 1, 2)) - self.assertEqual(11, self.number_of_bulks( + self.corpus("b", [docs2, docs2, docs2, docs2, docs2, docs1])], 0, 0, 1, 2) == 11 + assert self.number_of_bulks( [self.corpus("a", [docs2, docs2, docs2, docs2, docs1]), - self.corpus("b", [docs2, docs2, docs2, docs2, docs2, docs1])], 0, 0, 1, 3)) - self.assertEqual(11, self.number_of_bulks( + self.corpus("b", [docs2, docs2, docs2, docs2, docs2, docs1])], 0, 0, 1, 3) == 11 + assert self.number_of_bulks( [self.corpus("a", [docs2, docs2, docs2, docs2, docs1]), - self.corpus("b", [docs2, docs2, docs2, docs2, docs2, docs1])], 0, 0, 1, 100)) + self.corpus("b", [docs2, docs2, docs2, docs2, docs2, docs1])], 0, 0, 1, 100) == 11 - self.assertEqual(2, self.number_of_bulks([self.corpus("a", [self.docs(800)])], 0, 0, 3, 250)) - self.assertEqual(1, self.number_of_bulks([self.corpus("a", [self.docs(800)])], 0, 0, 3, 267)) - self.assertEqual(1, self.number_of_bulks([self.corpus("a", [self.docs(80)])], 0, 0, 3, 267)) + assert self.number_of_bulks([self.corpus("a", [self.docs(800)])], 0, 0, 3, 250) == 2 + assert self.number_of_bulks([self.corpus("a", [self.docs(800)])], 0, 0, 3, 267) == 1 + assert self.number_of_bulks([self.corpus("a", [self.docs(80)])], 0, 0, 3, 267) == 1 # this looks odd at first but we are prioritizing number of clients above bulk size - self.assertEqual(1, self.number_of_bulks([self.corpus("a", [self.docs(80)])], 1, 1, 3, 267)) - self.assertEqual(1, self.number_of_bulks([self.corpus("a", [self.docs(80)])], 2, 2, 3, 267)) + assert self.number_of_bulks([self.corpus("a", [self.docs(80)])], 1, 1, 3, 267) == 1 + assert self.number_of_bulks([self.corpus("a", [self.docs(80)])], 2, 2, 3, 267) == 1 @staticmethod def corpus(name, docs): @@ -700,11 +690,10 @@ def number_of_bulks(corpora, first_partition_index, last_partition_index, total_ return params.number_of_bulks(corpora, first_partition_index, last_partition_index, total_partitions, bulk_size) def test_build_conflicting_ids(self): - self.assertIsNone(params.build_conflicting_ids(params.IndexIdConflict.NoConflicts, 3, 0)) - self.assertEqual(["0000000000", "0000000001", "0000000002"], - params.build_conflicting_ids(params.IndexIdConflict.SequentialConflicts, 3, 0)) + assert params.build_conflicting_ids(params.IndexIdConflict.NoConflicts, 3, 0) is None + assert params.build_conflicting_ids(params.IndexIdConflict.SequentialConflicts, 3, 0) == ["0000000000", "0000000001", "0000000002"] # we cannot tell anything specific about the contents... - self.assertEqual(3, len(params.build_conflicting_ids(params.IndexIdConflict.RandomConflicts, 3, 0))) + assert len(params.build_conflicting_ids(params.IndexIdConflict.RandomConflicts, 3, 0)) == 3 # pylint: disable=too-many-public-methods @@ -717,17 +706,17 @@ def test_create_without_params(self): target_type="test-type" )]) - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test", corpora=[corpus]), params={}) - self.assertEqual("Mandatory parameter 'bulk-size' is missing", ctx.exception.args[0]) + assert ctx.value.args[0] == "Mandatory parameter 'bulk-size' is missing" def test_create_without_corpora_definition(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={}) - self.assertEqual("There is no document corpus definition for track unit-test. " - "You must add at least one before making bulk requests to Elasticsearch.", ctx.exception.args[0]) + assert ctx.value.args[0] == "There is no document corpus definition for track unit-test. " \ + "You must add at least one before making bulk requests to Elasticsearch." def test_create_with_non_numeric_bulk_size(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -737,12 +726,12 @@ def test_create_with_non_numeric_bulk_size(self): target_type="test-type" )]) - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test", corpora=[corpus]), params={ "bulk-size": "Three" }) - self.assertEqual("'bulk-size' must be numeric", ctx.exception.args[0]) + assert ctx.value.args[0] == "'bulk-size' must be numeric" def test_create_with_negative_bulk_size(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -752,12 +741,12 @@ def test_create_with_negative_bulk_size(self): target_type="test-type" )]) - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test", corpora=[corpus]), params={ "bulk-size": -5 }) - self.assertEqual("'bulk-size' must be positive but was -5", ctx.exception.args[0]) + assert ctx.value.args[0] == "'bulk-size' must be positive but was -5" def test_create_with_fraction_smaller_batch_size(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -767,13 +756,13 @@ def test_create_with_fraction_smaller_batch_size(self): target_type="test-type" )]) - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test", corpora=[corpus]), params={ "bulk-size": 5, "batch-size": 3 }) - self.assertEqual("'batch-size' must be greater than or equal to 'bulk-size'", ctx.exception.args[0]) + assert ctx.value.args[0] == "'batch-size' must be greater than or equal to 'bulk-size'" def test_create_with_fraction_larger_batch_size(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -783,13 +772,13 @@ def test_create_with_fraction_larger_batch_size(self): target_type="test-type" )]) - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test", corpora=[corpus]), params={ "bulk-size": 5, "batch-size": 8 }) - self.assertEqual("'batch-size' must be a multiple of 'bulk-size'", ctx.exception.args[0]) + assert ctx.value.args[0] == "'batch-size' must be a multiple of 'bulk-size'" def test_create_with_metadata_in_source_file_but_conflicts(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -800,39 +789,39 @@ def test_create_with_metadata_in_source_file_but_conflicts(self): includes_action_and_meta_data=True) ]) - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test", corpora=[corpus]), params={ "conflicts": "random" }) - self.assertEqual("Cannot generate id conflicts [random] as [docs.json.bz2] in document corpus [default] already contains " - "an action and meta-data line.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Cannot generate id conflicts [random] as [docs.json.bz2] in document corpus [default] already contains " \ + "an action and meta-data line." def test_create_with_unknown_id_conflicts(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={ "conflicts": "crazy" }) - self.assertEqual("Unknown 'conflicts' setting [crazy]", ctx.exception.args[0]) + assert ctx.value.args[0] == "Unknown 'conflicts' setting [crazy]" def test_create_with_unknown_on_conflict_setting(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={ "conflicts": "sequential", "on-conflict": "delete" }) - self.assertEqual("Unknown 'on-conflict' setting [delete]", ctx.exception.args[0]) + assert ctx.value.args[0] == "Unknown 'on-conflict' setting [delete]" def test_create_with_conflicts_and_data_streams(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={ "data-streams": ["test-data-stream-1", "test-data-stream-2"], "conflicts": "sequential" }) - self.assertEqual("'conflicts' cannot be used with 'data-streams'", ctx.exception.args[0]) + assert ctx.value.args[0] == "'conflicts' cannot be used with 'data-streams'" def test_create_with_ingest_percentage_too_low(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -842,13 +831,13 @@ def test_create_with_ingest_percentage_too_low(self): target_type="test-type" )]) - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test", corpora=[corpus]), params={ "bulk-size": 5000, "ingest-percentage": 0.0 }) - self.assertEqual("'ingest-percentage' must be in the range (0.0, 100.0] but was 0.0", ctx.exception.args[0]) + assert ctx.value.args[0] == "'ingest-percentage' must be in the range (0.0, 100.0] but was 0.0" def test_create_with_ingest_percentage_too_high(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -858,13 +847,13 @@ def test_create_with_ingest_percentage_too_high(self): target_type="test-type" )]) - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test", corpora=[corpus]), params={ "bulk-size": 5000, "ingest-percentage": 100.1 }) - self.assertEqual("'ingest-percentage' must be in the range (0.0, 100.0] but was 100.1", ctx.exception.args[0]) + assert ctx.value.args[0] == "'ingest-percentage' must be in the range (0.0, 100.0] but was 100.1" def test_create_with_ingest_percentage_not_numeric(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -874,13 +863,13 @@ def test_create_with_ingest_percentage_not_numeric(self): target_type="test-type" )]) - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test", corpora=[corpus]), params={ "bulk-size": 5000, "ingest-percentage": "100 percent" }) - self.assertEqual("'ingest-percentage' must be numeric", ctx.exception.args[0]) + assert ctx.value.args[0] == "'ingest-percentage' must be numeric" def test_create_valid_param_source(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -890,13 +879,13 @@ def test_create_valid_param_source(self): target_type="test-type" )]) - self.assertIsNotNone(params.BulkIndexParamSource(track.Track(name="unit-test", corpora=[corpus]), params={ + assert params.BulkIndexParamSource(track.Track(name="unit-test", corpora=[corpus]), params={ "conflicts": "random", "bulk-size": 5000, "batch-size": 20000, "ingest-percentage": 20.5, "pipeline": "test-pipeline" - })) + }) is not None def test_passes_all_corpora_by_default(self): corpora = [ @@ -926,7 +915,7 @@ def test_passes_all_corpora_by_default(self): }) partition = source.partition(0, 1) - self.assertEqual(partition.corpora, corpora) + assert corpora == partition.corpora def test_filters_corpora(self): corpora = [ @@ -957,7 +946,7 @@ def test_filters_corpora(self): }) partition = source.partition(0, 1) - self.assertEqual(partition.corpora, [corpora[1]]) + assert [corpora[1]] == partition.corpora def test_filters_corpora_by_data_stream(self): corpora = [ @@ -992,7 +981,7 @@ def test_filters_corpora_by_data_stream(self): }) partition = source.partition(0, 1) - self.assertEqual(partition.corpora, [corpora[0], corpora[2]]) + assert [corpora[0], corpora[2]] == partition.corpora def test_raises_exception_if_no_corpus_matches(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -1002,7 +991,7 @@ def test_raises_exception_if_no_corpus_matches(self): target_type="test-type" )]) - with self.assertRaises(exceptions.RallyAssertionError) as ctx: + with pytest.raises(exceptions.RallyAssertionError) as ctx: params.BulkIndexParamSource( track=track.Track(name="unit-test", corpora=[corpus]), params={ @@ -1013,7 +1002,7 @@ def test_raises_exception_if_no_corpus_matches(self): "pipeline": "test-pipeline" }) - self.assertEqual("The provided corpus ['does_not_exist'] does not match any of the corpora ['default'].", ctx.exception.args[0]) + assert ctx.value.args[0] == "The provided corpus ['does_not_exist'] does not match any of the corpora ['default']." def test_ingests_all_documents_by_default(self): corpora = [ @@ -1042,7 +1031,7 @@ def test_ingests_all_documents_by_default(self): partition = source.partition(0, 1) partition._init_internal_params() # # no ingest-percentage specified, should issue all one hundred bulk requests - self.assertEqual(100, partition.total_bulks) + assert partition.total_bulks == 100 def test_restricts_number_of_bulks_if_required(self): def create_unit_test_reader(*args): @@ -1094,8 +1083,8 @@ def schedule(param_source): partition = source.partition(0, 1) partition._init_internal_params() # should issue three bulks of size 10.000 - self.assertEqual(3, partition.total_bulks) - self.assertEqual(3, len(list(schedule(partition)))) + assert partition.total_bulks == 3 + assert len(list(schedule(partition))) == 3 def test_create_with_conflict_probability_zero(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -1112,34 +1101,34 @@ def test_create_with_conflict_probability_zero(self): }) def test_create_with_conflict_probability_too_low(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={ "bulk-size": 5000, "conflicts": "sequential", "conflict-probability": -0.1 }) - self.assertEqual("'conflict-probability' must be in the range [0.0, 100.0] but was -0.1", ctx.exception.args[0]) + assert ctx.value.args[0] == "'conflict-probability' must be in the range [0.0, 100.0] but was -0.1" def test_create_with_conflict_probability_too_high(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={ "bulk-size": 5000, "conflicts": "sequential", "conflict-probability": 100.1 }) - self.assertEqual("'conflict-probability' must be in the range [0.0, 100.0] but was 100.1", ctx.exception.args[0]) + assert ctx.value.args[0] == "'conflict-probability' must be in the range [0.0, 100.0] but was 100.1" def test_create_with_conflict_probability_not_numeric(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.BulkIndexParamSource(track=track.Track(name="unit-test"), params={ "bulk-size": 5000, "conflicts": "sequential", "conflict-probability": "100 percent" }) - self.assertEqual("'conflict-probability' must be numeric", ctx.exception.args[0]) + assert ctx.value.args[0] == "'conflict-probability' must be numeric" class BulkDataGeneratorTests(TestCase): @@ -1170,8 +1159,8 @@ def test_generate_two_bulks(self): }, create_reader=BulkDataGeneratorTests. create_test_reader([["1", "2", "3", "4", "5"], ["6", "7", "8"]])) all_bulks = list(bulks) - self.assertEqual(2, len(all_bulks)) - self.assertEqual({ + assert len(all_bulks) == 2 + assert all_bulks[0] == { "action-metadata-present": True, "body": ["1", "2", "3", "4", "5"], "bulk-size": 5, @@ -1180,9 +1169,9 @@ def test_generate_two_bulks(self): "type": "test-type", "my-custom-parameter": "foo", "my-custom-parameter-2": True - }, all_bulks[0]) + } - self.assertEqual({ + assert all_bulks[1] == { "action-metadata-present": True, "body": ["6", "7", "8"], "bulk-size": 3, @@ -1191,7 +1180,7 @@ def test_generate_two_bulks(self): "type": "test-type", "my-custom-parameter": "foo", "my-custom-parameter-2": True - }, all_bulks[1]) + } def test_generate_bulks_from_multiple_corpora(self): corpora = [ @@ -1228,8 +1217,8 @@ def test_generate_bulks_from_multiple_corpora(self): }, create_reader=BulkDataGeneratorTests. create_test_reader([["1", "2", "3", "4", "5"]])) all_bulks = list(bulks) - self.assertEqual(3, len(all_bulks)) - self.assertEqual({ + assert len(all_bulks) == 3 + assert all_bulks[0] == { "action-metadata-present": True, "body": ["1", "2", "3", "4", "5"], "bulk-size": 5, @@ -1238,9 +1227,9 @@ def test_generate_bulks_from_multiple_corpora(self): "type": "docs", "my-custom-parameter": "foo", "my-custom-parameter-2": True - }, all_bulks[0]) + } - self.assertEqual({ + assert all_bulks[1] == { "action-metadata-present": True, "body": ["1", "2", "3", "4", "5"], "bulk-size": 5, @@ -1249,9 +1238,9 @@ def test_generate_bulks_from_multiple_corpora(self): "type": "docs", "my-custom-parameter": "foo", "my-custom-parameter-2": True - }, all_bulks[1]) + } - self.assertEqual({ + assert all_bulks[2] == { "action-metadata-present": True, "body": ["1", "2", "3", "4", "5"], "bulk-size": 5, @@ -1260,7 +1249,7 @@ def test_generate_bulks_from_multiple_corpora(self): "type": "docs", "my-custom-parameter": "foo", "my-custom-parameter-2": True - }, all_bulks[2]) + } def test_internal_params_take_precedence(self): corpus = track.DocumentCorpus(name="default", documents=[ @@ -1281,9 +1270,9 @@ def test_internal_params_take_precedence(self): }, create_reader=BulkDataGeneratorTests. create_test_reader([["1", "2", "3"]])) all_bulks = list(bulks) - self.assertEqual(1, len(all_bulks)) + assert len(all_bulks) == 1 # body must not contain 'foo'! - self.assertEqual({ + assert all_bulks[0] == { "action-metadata-present": True, "body": ["1", "2", "3"], "bulk-size": 3, @@ -1291,7 +1280,7 @@ def test_internal_params_take_precedence(self): "index": "test-idx", "type": "test-type", "custom-param": "bar" - }, all_bulks[0]) + } class ParamsRegistrationTests(TestCase): @@ -1347,7 +1336,7 @@ def test_can_register_legacy_function_as_param_source(self): params.register_param_source_for_name(source_name, ParamsRegistrationTests.param_source_legacy_function) source = params.param_source_for_name(source_name, track.Track(name="unit-test"), {"parameter": 42}) - self.assertEqual({"key": 42}, source.params()) + assert source.params() == {"key": 42} params._unregister_param_source_for_name(source_name) @@ -1356,7 +1345,7 @@ def test_can_register_function_as_param_source(self): params.register_param_source_for_name(source_name, ParamsRegistrationTests.param_source_function) source = params.param_source_for_name(source_name, track.Track(name="unit-test"), {"parameter": 42}) - self.assertEqual({"key": 42}, source.params()) + assert source.params() == {"key": 42} params._unregister_param_source_for_name(source_name) @@ -1365,7 +1354,7 @@ def test_can_register_legacy_class_as_param_source(self): params.register_param_source_for_name(source_name, ParamsRegistrationTests.ParamSourceLegacyClass) source = params.param_source_for_name(source_name, track.Track(name="unit-test"), {"parameter": 42}) - self.assertEqual({"class-key": 42}, source.params()) + assert source.params() == {"class-key": 42} params._unregister_param_source_for_name(source_name) @@ -1374,36 +1363,33 @@ def test_can_register_class_as_param_source(self): params.register_param_source_for_name(source_name, ParamsRegistrationTests.ParamSourceClass) source = params.param_source_for_name(source_name, track.Track(name="unit-test"), {"parameter": 42}) - self.assertEqual({"class-key": 42}, source.params()) + assert source.params() == {"class-key": 42} params._unregister_param_source_for_name(source_name) def test_cannot_register_an_instance_as_param_source(self): source_name = "params-test-class-param-source" # we create an instance, instead of passing the class - with self.assertRaisesRegex(exceptions.RallyAssertionError, - "Parameter source \\[test param source\\] must be either a function or a class\\."): + with pytest.raises(exceptions.RallyAssertionError, match="Parameter source \\[test param source\\] must be either a function or a class\\."): params.register_param_source_for_name(source_name, ParamsRegistrationTests.ParamSourceClass()) class SleepParamSourceTests(TestCase): def test_missing_duration_parameter(self): - with self.assertRaisesRegex(exceptions.InvalidSyntax, "parameter 'duration' is mandatory for sleep operation"): + with pytest.raises(exceptions.InvalidSyntax, match="parameter 'duration' is mandatory for sleep operation"): params.SleepParamSource(track.Track(name="unit-test"), params={}) def test_duration_parameter_wrong_type(self): - with self.assertRaisesRegex(exceptions.InvalidSyntax, - "parameter 'duration' for sleep operation must be a number"): + with pytest.raises(exceptions.InvalidSyntax, match="parameter 'duration' for sleep operation must be a number"): params.SleepParamSource(track.Track(name="unit-test"), params={"duration": "this is a string"}) def test_duration_parameter_negative_number(self): - with self.assertRaisesRegex(exceptions.InvalidSyntax, - "parameter 'duration' must be non-negative but was -1.0"): + with pytest.raises(exceptions.InvalidSyntax, match="parameter 'duration' must be non-negative but was -1.0"): params.SleepParamSource(track.Track(name="unit-test"), params={"duration": -1.0}) def test_param_source_passes_all_parameters(self): p = params.SleepParamSource(track.Track(name="unit-test"), params={"duration": 3.4, "additional": True}) - self.assertDictEqual({"duration": 3.4, "additional": True}, p.params()) + assert p.params() == {"duration": 3.4, "additional": True} class CreateIndexParamSourceTests(TestCase): @@ -1427,11 +1413,11 @@ def test_create_index_inline_with_body(self): }) p = source.params() - self.assertEqual(1, len(p["indices"])) + assert len(p["indices"]) == 1 index, body = p["indices"][0] - self.assertEqual("test", index) - self.assertTrue(len(body) > 0) - self.assertEqual({}, p["request-params"]) + assert index == "test" + assert len(body) > 0 + assert p["request-params"] == {} def test_create_index_inline_without_body(self): source = params.CreateIndexParamSource(track.Track(name="unit-test"), params={ @@ -1442,13 +1428,13 @@ def test_create_index_inline_without_body(self): }) p = source.params() - self.assertEqual(1, len(p["indices"])) + assert len(p["indices"]) == 1 index, body = p["indices"][0] - self.assertEqual("test", index) - self.assertIsNone(body) - self.assertDictEqual({ + assert index == "test" + assert body is None + assert p["request-params"] == { "wait_for_active_shards": True - }, p["request-params"]) + } def test_create_index_from_track_with_settings(self): index1 = track.Index(name="index1", types=["type1"]) @@ -1475,21 +1461,21 @@ def test_create_index_from_track_with_settings(self): }) p = source.params() - self.assertEqual(2, len(p["indices"])) + assert len(p["indices"]) == 2 index, body = p["indices"][0] - self.assertEqual("index1", index) + assert index == "index1" # index did not specify any body - self.assertDictEqual({ + assert body == { "settings": { "index.number_of_replicas": 1 } - }, body) + } index, body = p["indices"][1] - self.assertEqual("index2", index) + assert index == "index2" # index specified a body + we need to merge settings - self.assertDictEqual({ + assert body == { "settings": { # we have properly merged (overridden) an existing setting "index.number_of_replicas": 1, @@ -1505,7 +1491,7 @@ def test_create_index_from_track_with_settings(self): } } } - }, body) + } def test_create_index_from_track_without_settings(self): index1 = track.Index(name="index1", types=["type1"]) @@ -1528,17 +1514,17 @@ def test_create_index_from_track_without_settings(self): source = params.CreateIndexParamSource(track.Track(name="unit-test", indices=[index1, index2]), params={}) p = source.params() - self.assertEqual(2, len(p["indices"])) + assert len(p["indices"]) == 2 index, body = p["indices"][0] - self.assertEqual("index1", index) + assert index == "index1" # index did not specify any body - self.assertDictEqual({}, body) + assert body == {} index, body = p["indices"][1] - self.assertEqual("index2", index) + assert index == "index2" # index specified a body - self.assertDictEqual({ + assert body == { "settings": { "index.number_of_replicas": 0, "index.number_of_shards": 3 @@ -1552,7 +1538,7 @@ def test_create_index_from_track_without_settings(self): } } } - }, body) + } def test_filter_index(self): index1 = track.Index(name="index1", types=["type1"]) @@ -1564,10 +1550,10 @@ def test_filter_index(self): }) p = source.params() - self.assertEqual(1, len(p["indices"])) + assert len(p["indices"]) == 1 index, _ = p["indices"][0] - self.assertEqual("index2", index) + assert index == "index2" class CreateDataStreamParamSourceTests(TestCase): @@ -1576,10 +1562,10 @@ def test_create_data_stream(self): "data-stream": "test-data-stream" }) p = source.params() - self.assertEqual(1, len(p["data-streams"])) + assert len(p["data-streams"]) == 1 ds = p["data-streams"][0] - self.assertEqual("test-data-stream", ds) - self.assertEqual({}, p["request-params"]) + assert ds == "test-data-stream" + assert p["request-params"] == {} def test_create_data_stream_inline_without_body(self): source = params.CreateDataStreamParamSource(track.Track(name="unit-test"), params={ @@ -1590,12 +1576,12 @@ def test_create_data_stream_inline_without_body(self): }) p = source.params() - self.assertEqual(1, len(p["data-streams"])) + assert len(p["data-streams"]) == 1 ds = p["data-streams"][0] - self.assertEqual("test-data-stream", ds) - self.assertDictEqual({ + assert ds == "test-data-stream" + assert p["request-params"] == { "wait_for_active_shards": True - }, p["request-params"]) + } def test_filter_data_stream(self): source = params.CreateDataStreamParamSource( @@ -1605,10 +1591,10 @@ def test_filter_data_stream(self): params={"data-stream": "data-stream-2"}) p = source.params() - self.assertEqual(1, len(p["data-streams"])) + assert len(p["data-streams"]) == 1 ds = p["data-streams"][0] - self.assertEqual("data-stream-2", ds) + assert ds == "data-stream-2" class DeleteIndexParamSourceTests(TestCase): @@ -1621,9 +1607,9 @@ def test_delete_index_from_track(self): p = source.params() - self.assertEqual(["index1", "index2", "index3"], p["indices"]) - self.assertDictEqual({}, p["request-params"]) - self.assertTrue(p["only-if-exists"]) + assert p["indices"] == ["index1", "index2", "index3"] + assert p["request-params"] == {} + assert p["only-if-exists"] def test_filter_index_from_track(self): source = params.DeleteIndexParamSource(track.Track(name="unit-test", indices=[ @@ -1634,21 +1620,21 @@ def test_filter_index_from_track(self): p = source.params() - self.assertEqual(["index2"], p["indices"]) - self.assertDictEqual({"allow_no_indices": True}, p["request-params"]) - self.assertFalse(p["only-if-exists"]) + assert p["indices"] == ["index2"] + assert p["request-params"] == {"allow_no_indices": True} + assert not p["only-if-exists"] def test_delete_index_by_name(self): source = params.DeleteIndexParamSource(track.Track(name="unit-test"), params={"index": "index2"}) p = source.params() - self.assertEqual(["index2"], p["indices"]) + assert p["indices"] == ["index2"] def test_delete_no_index(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.DeleteIndexParamSource(track.Track(name="unit-test"), params={}) - self.assertEqual("delete-index operation targets no index", ctx.exception.args[0]) + assert ctx.value.args[0] == "delete-index operation targets no index" class DeleteDataStreamParamSourceTests(TestCase): @@ -1661,9 +1647,9 @@ def test_delete_data_stream_from_track(self): p = source.params() - self.assertEqual(["data-stream-1", "data-stream-2", "data-stream-3"], p["data-streams"]) - self.assertDictEqual({}, p["request-params"]) - self.assertTrue(p["only-if-exists"]) + assert p["data-streams"] == ["data-stream-1", "data-stream-2", "data-stream-3"] + assert p["request-params"] == {} + assert p["only-if-exists"] def test_filter_data_stream_from_track(self): source = params.DeleteDataStreamParamSource(track.Track(name="unit-test", data_streams=[ @@ -1675,9 +1661,9 @@ def test_filter_data_stream_from_track(self): p = source.params() - self.assertEqual(["data-stream-2"], p["data-streams"]) - self.assertDictEqual({"allow_no_indices": True}, p["request-params"]) - self.assertFalse(p["only-if-exists"]) + assert p["data-streams"] == ["data-stream-2"] + assert p["request-params"] == {"allow_no_indices": True} + assert not p["only-if-exists"] def test_delete_data_stream_by_name(self): source = params.DeleteDataStreamParamSource(track.Track(name="unit-test"), @@ -1685,12 +1671,12 @@ def test_delete_data_stream_by_name(self): p = source.params() - self.assertEqual(["data-stream-2"], p["data-streams"]) + assert p["data-streams"] == ["data-stream-2"] def test_delete_no_data_stream(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.DeleteDataStreamParamSource(track.Track(name="unit-test"), params={}) - self.assertEqual("delete-data-stream operation targets no data stream", ctx.exception.args[0]) + assert ctx.value.args[0] == "delete-data-stream operation targets no data stream" class CreateIndexTemplateParamSourceTests(TestCase): @@ -1714,11 +1700,11 @@ def test_create_index_template_inline(self): p = source.params() - self.assertEqual(1, len(p["templates"])) - self.assertDictEqual({}, p["request-params"]) + assert len(p["templates"]) == 1 + assert p["request-params"] == {} template, body = p["templates"][0] - self.assertEqual("test", template) - self.assertDictEqual({ + assert template == "test" + assert body == { "index_patterns": ["*"], "settings": { "index.number_of_shards": 3 @@ -1730,7 +1716,7 @@ def test_create_index_template_inline(self): } } } - }, body) + } def test_create_index_template_from_track(self): tpl = track.IndexTemplate(name="default", pattern="*", content={ @@ -1755,11 +1741,11 @@ def test_create_index_template_from_track(self): p = source.params() - self.assertEqual(1, len(p["templates"])) - self.assertDictEqual({}, p["request-params"]) + assert len(p["templates"]) == 1 + assert p["request-params"] == {} template, body = p["templates"][0] - self.assertEqual("default", template) - self.assertDictEqual({ + assert template == "default" + assert body == { "index_patterns": ["*"], "settings": { "index.number_of_shards": 3, @@ -1772,7 +1758,7 @@ def test_create_index_template_from_track(self): } } } - }, body) + } class DeleteIndexTemplateParamSourceTests(TestCase): @@ -1781,10 +1767,10 @@ def test_delete_index_template_by_name(self): p = source.params() - self.assertEqual(1, len(p["templates"])) - self.assertEqual(("default", False, None), p["templates"][0]) - self.assertTrue(p["only-if-exists"]) - self.assertDictEqual({}, p["request-params"]) + assert len(p["templates"]) == 1 + assert p["templates"][0] == ("default", False, None) + assert p["only-if-exists"] + assert p["request-params"] == {} def test_delete_index_template_by_name_and_matching_indices(self): source = params.DeleteIndexTemplateParamSource(track.Track(name="unit-test"), @@ -1796,20 +1782,19 @@ def test_delete_index_template_by_name_and_matching_indices(self): p = source.params() - self.assertEqual(1, len(p["templates"])) - self.assertEqual(("default", True, "logs-*"), p["templates"][0]) - self.assertTrue(p["only-if-exists"]) - self.assertDictEqual({}, p["request-params"]) + assert len(p["templates"]) == 1 + assert p["templates"][0] == ("default", True, "logs-*") + assert p["only-if-exists"] + assert p["request-params"] == {} def test_delete_index_template_by_name_and_matching_indices_missing_index_pattern(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.DeleteIndexTemplateParamSource(track.Track(name="unit-test"), params={ "template": "default", "delete-matching-indices": True }) - self.assertEqual("The property 'index-pattern' is required for delete-index-template if 'delete-matching-indices' is true.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "The property 'index-pattern' is required for delete-index-template if 'delete-matching-indices' is true." def test_delete_index_template_from_track(self): tpl1 = track.IndexTemplate(name="metrics", pattern="metrics-*", delete_matching_indices=True, content={ @@ -1832,11 +1817,11 @@ def test_delete_index_template_from_track(self): p = source.params() - self.assertEqual(2, len(p["templates"])) - self.assertEqual(("metrics", True, "metrics-*"), p["templates"][0]) - self.assertEqual(("logs", False, "logs-*"), p["templates"][1]) - self.assertFalse(p["only-if-exists"]) - self.assertDictEqual({"master_timeout": 20}, p["request-params"]) + assert len(p["templates"]) == 2 + assert p["templates"][0] == ("metrics", True, "metrics-*") + assert p["templates"][1] == ("logs", False, "logs-*") + assert not p["only-if-exists"] + assert p["request-params"] == {"master_timeout": 20} class CreateComposableTemplateParamSourceTests(TestCase): @@ -1856,11 +1841,11 @@ def test_create_index_template_inline(self): p = source.params() - self.assertEqual(1, len(p["templates"])) - self.assertDictEqual({}, p["request-params"]) + assert len(p["templates"]) == 1 + assert p["request-params"] == {} template, body = p["templates"][0] - self.assertEqual("test", template) - self.assertDictEqual({ + assert template == "test" + assert body == { "index_patterns": ["my*"], "template": { "settings" : { @@ -1868,7 +1853,7 @@ def test_create_index_template_inline(self): } }, "composed_of": ["ct1", "ct2"] - }, body) + } def test_create_composable_index_template_from_track(self): tpl = track.IndexTemplate(name="default", pattern="*", content={ @@ -1889,11 +1874,11 @@ def test_create_composable_index_template_from_track(self): p = source.params() - self.assertEqual(1, len(p["templates"])) - self.assertDictEqual({}, p["request-params"]) + assert len(p["templates"]) == 1 + assert p["request-params"] == {} template, body = p["templates"][0] - self.assertEqual("default", template) - self.assertDictEqual({ + assert template == "default" + assert body == { "index_patterns": ["my*"], "template": { "settings" : { @@ -1902,7 +1887,7 @@ def test_create_composable_index_template_from_track(self): } }, "composed_of": ["ct1", "ct2"] - }, body) + } def test_create_or_merge(self): content = params.CreateComposableTemplateParamSource._create_or_merge({"parent": {}}, ["parent", "child", "grandchild"], @@ -1934,7 +1919,7 @@ def test_create_or_merge(self): assert content["parent"]["child"]["grandchild"]["name"]["last"] == "Smith" def test_no_templates_specified(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.CreateComposableTemplateParamSource( track=track.Track(name="unit-test"), params={ "settings": { @@ -1943,8 +1928,8 @@ def test_no_templates_specified(self): }, "operation-type": "create-composable-template" }) - self.assertEqual("Please set the properties 'template' and 'body' for the create-composable-template operation " - "or declare composable and/or component templates in the track", ctx.exception.args[0]) + assert ctx.value.args[0] == "Please set the properties 'template' and 'body' for the create-composable-template operation " \ + "or declare composable and/or component templates in the track" class CreateComponentTemplateParamSourceTests(TestCase): @@ -1971,11 +1956,11 @@ def test_create_component_index_template_from_track(self): p = source.params() - self.assertEqual(1, len(p["templates"])) - self.assertDictEqual({}, p["request-params"]) + assert len(p["templates"]) == 1 + assert p["request-params"] == {} template, body = p["templates"][0] - self.assertEqual("default", template) - self.assertDictEqual({ + assert template == "default" + assert body == { "template": { "settings": { "index.number_of_shards": 1, @@ -1989,24 +1974,23 @@ def test_create_component_index_template_from_track(self): } } } - }, body) + } class DeleteComponentTemplateParamSource(TestCase): def test_delete_index_template_by_name(self): source = params.DeleteComponentTemplateParamSource(track.Track(name="unit-test"), params={"template": "default"}) p = source.params() - self.assertEqual(1, len(p["templates"])) - self.assertEqual("default", p["templates"][0]) - self.assertTrue(p["only-if-exists"]) - self.assertDictEqual({}, p["request-params"]) + assert len(p["templates"]) == 1 + assert p["templates"][0] == "default" + assert p["only-if-exists"] + assert p["request-params"] == {} def test_delete_index_template_no_name(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.DeleteComponentTemplateParamSource(track.Track(name="unit-test"), params={"operation-type": "delete-component-template"}) - self.assertEqual("Please set the property 'template' for the delete-component-template operation.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Please set the property 'template' for the delete-component-template operation." def test_delete_index_template_from_track(self): tpl1 = track.ComponentTemplate(name="logs", content={ @@ -2037,11 +2021,11 @@ def test_delete_index_template_from_track(self): p = source.params() - self.assertEqual(2, len(p["templates"])) - self.assertEqual("logs", p["templates"][0]) - self.assertEqual("metrics", p["templates"][1]) - self.assertFalse(p["only-if-exists"]) - self.assertDictEqual({"master_timeout": 20}, p["request-params"]) + assert len(p["templates"]) == 2 + assert p["templates"][0] == "logs" + assert p["templates"][1] == "metrics" + assert not p["only-if-exists"] + assert p["request-params"] == {"master_timeout": 20} class SearchParamSourceTests(TestCase): @@ -2061,22 +2045,22 @@ def test_passes_cache(self): }) p = source.params() - self.assertEqual(10, len(p)) - self.assertEqual("index1", p["index"]) - self.assertIsNone(p["type"]) - self.assertIsNone(p["request-timeout"]) - self.assertIsNone(p["opaque-id"]) - self.assertDictEqual({"header1": "value1"}, p["headers"]) - self.assertEqual({}, p["request-params"]) + assert len(p) == 10 + assert p["index"] == "index1" + assert p["type"] is None + assert p["request-timeout"] is None + assert p["opaque-id"] is None + assert p["headers"] == {"header1": "value1"} + assert p["request-params"] == {} # Explicitly check in these tests for equality - assertFalse would also succeed if it is `None`. - self.assertEqual(True, p["cache"]) - self.assertEqual(True, p["response-compression-enabled"]) - self.assertEqual(False, p["detailed-results"]) - self.assertEqual({ + assert p["cache"] is True + assert p["response-compression-enabled"] is True + assert p["detailed-results"] is False + assert p["body"] == { "query": { "match_all": {} } - }, p["body"]) + } def test_uses_data_stream(self): ds1 = track.DataStream(name="data-stream-1") @@ -2097,27 +2081,27 @@ def test_uses_data_stream(self): }) p = source.params() - self.assertEqual(10, len(p)) - self.assertEqual("data-stream-1", p["index"]) - self.assertIsNone(p["type"]) - self.assertEqual(1.0, p["request-timeout"]) - self.assertDictEqual({ + assert len(p) == 10 + assert p["index"] == "data-stream-1" + assert p["type"] is None + assert p["request-timeout"] == 1.0 + assert p["headers"] == { "header1": "value1", "header2": "value2" - }, p["headers"]) - self.assertEqual("12345abcde", p["opaque-id"]) - self.assertEqual({}, p["request-params"]) - self.assertEqual(True, p["cache"]) - self.assertEqual(True, p["response-compression-enabled"]) - self.assertEqual(False, p["detailed-results"]) - self.assertEqual({ + } + assert p["opaque-id"] == "12345abcde" + assert p["request-params"] == {} + assert p["cache"] is True + assert p["response-compression-enabled"] is True + assert p["detailed-results"] is False + assert p["body"] == { "query": { "match_all": {} } - }, p["body"]) + } def test_create_without_index(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: params.SearchParamSource(track=track.Track(name="unit-test"), params={ "type": "type1", "body": { @@ -2127,7 +2111,7 @@ def test_create_without_index(self): } }, operation_name="test_operation") - self.assertEqual("'index' or 'data-stream' is mandatory and is missing for operation 'test_operation'", ctx.exception.args[0]) + assert ctx.value.args[0] == "'index' or 'data-stream' is mandatory and is missing for operation 'test_operation'" def test_passes_request_parameters(self): index1 = track.Index(name="index1", types=["type1"]) @@ -2144,23 +2128,23 @@ def test_passes_request_parameters(self): }) p = source.params() - self.assertEqual(10, len(p)) - self.assertEqual("index1", p["index"]) - self.assertIsNone(p["type"]) - self.assertIsNone(p["request-timeout"]) - self.assertIsNone(p["headers"]) - self.assertIsNone(p["opaque-id"]) - self.assertEqual({ + assert len(p) == 10 + assert p["index"] == "index1" + assert p["type"] is None + assert p["request-timeout"] is None + assert p["headers"] is None + assert p["opaque-id"] is None + assert p["request-params"] == { "_source_include": "some_field" - }, p["request-params"]) - self.assertIsNone(p["cache"]) - self.assertEqual(True, p["response-compression-enabled"]) - self.assertEqual(False, p["detailed-results"]) - self.assertEqual({ + } + assert p["cache"] is None + assert p["response-compression-enabled"] is True + assert p["detailed-results"] is False + assert p["body"] == { "query": { "match_all": {} } - }, p["body"]) + } def test_user_specified_overrides_defaults(self): index1 = track.Index(name="index1", types=["type1"]) @@ -2180,22 +2164,22 @@ def test_user_specified_overrides_defaults(self): }) p = source.params() - self.assertEqual(10, len(p)) - self.assertEqual("_all", p["index"]) - self.assertEqual("type1", p["type"]) - self.assertDictEqual({}, p["request-params"]) - self.assertIsNone(p["request-timeout"]) - self.assertIsNone(p["headers"]) - self.assertEqual("12345abcde", p["opaque-id"]) + assert len(p) == 10 + assert p["index"] == "_all" + assert p["type"] == "type1" + assert p["request-params"] == {} + assert p["request-timeout"] is None + assert p["headers"] is None + assert p["opaque-id"] == "12345abcde" # Explicitly check for equality to `False` - assertFalse would also succeed if it is `None`. - self.assertEqual(False, p["cache"]) - self.assertEqual(False, p["response-compression-enabled"]) - self.assertEqual(True, p["detailed-results"]) - self.assertEqual({ + assert p["cache"] is False + assert p["response-compression-enabled"] is False + assert p["detailed-results"] is True + assert p["body"] == { "query": { "match_all": {} } - }, p["body"]) + } def test_user_specified_data_stream_overrides_defaults(self): ds1 = track.DataStream(name="data-stream-1") @@ -2213,25 +2197,25 @@ def test_user_specified_data_stream_overrides_defaults(self): }) p = source.params() - self.assertEqual(10, len(p)) - self.assertEqual("data-stream-2", p["index"]) - self.assertIsNone(p["type"]) - self.assertEqual(1.0, p["request-timeout"]) - self.assertIsNone(p["headers"]) - self.assertIsNone(p["opaque-id"]) - self.assertDictEqual({}, p["request-params"]) + assert len(p) == 10 + assert p["index"] == "data-stream-2" + assert p["type"] is None + assert p["request-timeout"] == 1.0 + assert p["headers"] is None + assert p["opaque-id"] is None + assert p["request-params"] == {} # Explicitly check for equality to `False` - assertFalse would also succeed if it is `None`. - self.assertEqual(False, p["cache"]) - self.assertEqual(False, p["response-compression-enabled"]) - self.assertEqual(False, p["detailed-results"]) - self.assertEqual({ + assert p["cache"] is False + assert p["response-compression-enabled"] is False + assert p["detailed-results"] is False + assert p["body"] == { "query": { "match_all": {} } - }, p["body"]) + } def test_invalid_data_stream_with_type(self): - with self.assertRaises(exceptions.InvalidSyntax) as ctx: + with pytest.raises(exceptions.InvalidSyntax) as ctx: ds1 = track.DataStream(name="data-stream-1") params.SearchParamSource(track=track.Track(name="unit-test", data_streams=[ds1]), params={ @@ -2246,13 +2230,11 @@ def test_invalid_data_stream_with_type(self): } }, operation_name="test_operation") - self.assertEqual("'type' not supported with 'data-stream' for operation 'test_operation'", - ctx.exception.args[0]) + assert ctx.value.args[0] == "'type' not supported with 'data-stream' for operation 'test_operation'" def test_assertions_without_detailed_results_are_invalid(self): index1 = track.Index(name="index1", types=["type1"]) - with self.assertRaisesRegex(exceptions.InvalidSyntax, - r"The property \[detailed-results\] must be \[true\] if assertions are defined"): + with pytest.raises(exceptions.InvalidSyntax, match=r"The property \[detailed-results\] must be \[true\] if assertions are defined"): params.SearchParamSource(track=track.Track(name="unit-test", indices=[index1]), params={ "index": "_all", # unset! @@ -2280,8 +2262,8 @@ def test_force_merge_index_from_track(self): p = source.params() - self.assertEqual("index1,index2,index3", p["index"]) - self.assertEqual("blocking", p["mode"]) + assert p["index"] == "index1,index2,index3" + assert p["mode"] == "blocking" def test_force_merge_data_stream_from_track(self): source = params.ForceMergeParamSource(track.Track(name="unit-test", data_streams=[ @@ -2292,32 +2274,32 @@ def test_force_merge_data_stream_from_track(self): p = source.params() - self.assertEqual("data-stream-1,data-stream-2,data-stream-3", p["index"]) - self.assertEqual("blocking", p["mode"]) + assert p["index"] == "data-stream-1,data-stream-2,data-stream-3" + assert p["mode"] == "blocking" def test_force_merge_index_by_name(self): source = params.ForceMergeParamSource(track.Track(name="unit-test"), params={"index": "index2"}) p = source.params() - self.assertEqual("index2", p["index"]) - self.assertEqual("blocking", p["mode"]) + assert p["index"] == "index2" + assert p["mode"] == "blocking" def test_force_merge_by_data_stream_name(self): source = params.ForceMergeParamSource(track.Track(name="unit-test"), params={"data-stream": "data-stream-2"}) p = source.params() - self.assertEqual("data-stream-2", p["index"]) - self.assertEqual("blocking", p["mode"]) + assert p["index"] == "data-stream-2" + assert p["mode"] == "blocking" def test_default_force_merge_index(self): source = params.ForceMergeParamSource(track.Track(name="unit-test"), params={}) p = source.params() - self.assertEqual("_all", p["index"]) - self.assertEqual("blocking", p["mode"]) + assert p["index"] == "_all" + assert p["mode"] == "blocking" def test_force_merge_all_params(self): source = params.ForceMergeParamSource(track.Track(name="unit-test"), params={"index": "index2", @@ -2328,7 +2310,7 @@ def test_force_merge_all_params(self): p = source.params() - self.assertEqual("index2", p["index"]) - self.assertEqual(30, p["request-timeout"]) - self.assertEqual(1, p["max-num-segments"]) - self.assertEqual("polling", p["mode"]) + assert p["index"] == "index2" + assert p["request-timeout"] == 30 + assert p["max-num-segments"] == 1 + assert p["mode"] == "polling" diff --git a/tests/track/track_test.py b/tests/track/track_test.py index 69d7f5d99..c5663cc30 100644 --- a/tests/track/track_test.py +++ b/tests/track/track_test.py @@ -17,6 +17,8 @@ from unittest import TestCase +import pytest + from esrally import exceptions from esrally.track import track @@ -26,80 +28,77 @@ def test_finds_default_challenge(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge("other", description="non-default challenge", default=False) - self.assertEqual(default_challenge, - track.Track(name="unittest", + assert track.Track(name="unittest", description="unittest track", - challenges=[another_challenge, default_challenge]) - .default_challenge) + challenges=[another_challenge, default_challenge]) \ + .default_challenge == default_challenge def test_default_challenge_none_if_no_challenges(self): - self.assertIsNone(track.Track(name="unittest", + assert track.Track(name="unittest", description="unittest track", - challenges=[]) - .default_challenge) + challenges=[]) \ + .default_challenge is None def test_finds_challenge_by_name(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge("other", description="non-default challenge", default=False) - self.assertEqual(another_challenge, - track.Track(name="unittest", + assert track.Track(name="unittest", description="unittest track", - challenges=[another_challenge, default_challenge]) - .find_challenge_or_default("other")) + challenges=[another_challenge, default_challenge]) \ + .find_challenge_or_default("other") == another_challenge def test_uses_default_challenge_if_no_name_given(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge("other", description="non-default challenge", default=False) - self.assertEqual(default_challenge, - track.Track(name="unittest", + assert track.Track(name="unittest", description="unittest track", - challenges=[another_challenge, default_challenge]) - .find_challenge_or_default("")) + challenges=[another_challenge, default_challenge]) \ + .find_challenge_or_default("") == default_challenge def test_does_not_find_unknown_challenge(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge("other", description="non-default challenge", default=False) - with self.assertRaises(exceptions.InvalidName) as ctx: + with pytest.raises(exceptions.InvalidName) as ctx: track.Track(name="unittest", description="unittest track", challenges=[another_challenge, default_challenge]).find_challenge_or_default("unknown-name") - self.assertEqual("Unknown challenge [unknown-name] for track [unittest]", ctx.exception.args[0]) + assert ctx.value.args[0] == "Unknown challenge [unknown-name] for track [unittest]" class IndexTests(TestCase): def test_matches_exactly(self): - self.assertTrue(track.Index("test").matches("test")) - self.assertFalse(track.Index("test").matches(" test")) + assert track.Index("test").matches("test") + assert not track.Index("test").matches(" test") def test_matches_if_no_pattern_is_defined(self): - self.assertTrue(track.Index("test").matches(pattern=None)) + assert track.Index("test").matches(pattern=None) def test_matches_if_catch_all_pattern_is_defined(self): - self.assertTrue(track.Index("test").matches(pattern="*")) - self.assertTrue(track.Index("test").matches(pattern="_all")) + assert track.Index("test").matches(pattern="*") + assert track.Index("test").matches(pattern="_all") def test_str(self): - self.assertEqual("test", str(track.Index("test"))) + assert str(track.Index("test")) == "test" class DataStreamTests(TestCase): def test_matches_exactly(self): - self.assertTrue(track.DataStream("test").matches("test")) - self.assertFalse(track.DataStream("test").matches(" test")) + assert track.DataStream("test").matches("test") + assert not track.DataStream("test").matches(" test") def test_matches_if_no_pattern_is_defined(self): - self.assertTrue(track.DataStream("test").matches(pattern=None)) + assert track.DataStream("test").matches(pattern=None) def test_matches_if_catch_all_pattern_is_defined(self): - self.assertTrue(track.DataStream("test").matches(pattern="*")) - self.assertTrue(track.DataStream("test").matches(pattern="_all")) + assert track.DataStream("test").matches(pattern="*") + assert track.DataStream("test").matches(pattern="_all") def test_str(self): - self.assertEqual("test", str(track.DataStream("test"))) + assert str(track.DataStream("test")) == "test" class DocumentCorpusTests(TestCase): @@ -115,9 +114,9 @@ def test_do_not_filter(self): filtered_corpus = corpus.filter() - self.assertEqual(corpus.name, filtered_corpus.name) - self.assertListEqual(corpus.documents, filtered_corpus.documents) - self.assertDictEqual(corpus.meta_data, filtered_corpus.meta_data) + assert filtered_corpus.name == corpus.name + assert filtered_corpus.documents == corpus.documents + assert filtered_corpus.meta_data == corpus.meta_data def test_filter_documents_by_format(self): corpus = track.DocumentCorpus("test", documents=[ @@ -129,10 +128,10 @@ def test_filter_documents_by_format(self): filtered_corpus = corpus.filter(source_format=track.Documents.SOURCE_FORMAT_BULK) - self.assertEqual("test", filtered_corpus.name) - self.assertEqual(2, len(filtered_corpus.documents)) - self.assertEqual("logs-01", filtered_corpus.documents[0].target_index) - self.assertEqual("logs-03", filtered_corpus.documents[1].target_index) + assert filtered_corpus.name == "test" + assert len(filtered_corpus.documents) == 2 + assert filtered_corpus.documents[0].target_index == "logs-01" + assert filtered_corpus.documents[1].target_index == "logs-03" def test_filter_documents_by_indices(self): corpus = track.DocumentCorpus("test", documents=[ @@ -144,9 +143,9 @@ def test_filter_documents_by_indices(self): filtered_corpus = corpus.filter(target_indices=["logs-02"]) - self.assertEqual("test", filtered_corpus.name) - self.assertEqual(1, len(filtered_corpus.documents)) - self.assertEqual("logs-02", filtered_corpus.documents[0].target_index) + assert filtered_corpus.name == "test" + assert len(filtered_corpus.documents) == 1 + assert filtered_corpus.documents[0].target_index == "logs-02" def test_filter_documents_by_data_streams(self): corpus = track.DocumentCorpus("test", documents=[ @@ -159,9 +158,9 @@ def test_filter_documents_by_data_streams(self): ]) filtered_corpus = corpus.filter(target_data_streams=["logs-02"]) - self.assertEqual("test", filtered_corpus.name) - self.assertEqual(1, len(filtered_corpus.documents)) - self.assertEqual("logs-02", filtered_corpus.documents[0].target_data_stream) + assert filtered_corpus.name == "test" + assert len(filtered_corpus.documents) == 1 + assert filtered_corpus.documents[0].target_data_stream == "logs-02" def test_filter_documents_by_format_and_indices(self): corpus = track.DocumentCorpus("test", documents=[ @@ -173,10 +172,10 @@ def test_filter_documents_by_format_and_indices(self): filtered_corpus = corpus.filter(source_format=track.Documents.SOURCE_FORMAT_BULK, target_indices=["logs-01", "logs-02"]) - self.assertEqual("test", filtered_corpus.name) - self.assertEqual(2, len(filtered_corpus.documents)) - self.assertEqual("logs-01", filtered_corpus.documents[0].target_index) - self.assertEqual("logs-02", filtered_corpus.documents[1].target_index) + assert filtered_corpus.name == "test" + assert len(filtered_corpus.documents) == 2 + assert filtered_corpus.documents[0].target_index == "logs-01" + assert filtered_corpus.documents[1].target_index == "logs-02" def test_union_document_corpus_is_reflexive(self): corpus = track.DocumentCorpus("test", documents=[ @@ -185,7 +184,7 @@ def test_union_document_corpus_is_reflexive(self): track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, number_of_documents=7, target_index="logs-03"), track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, number_of_documents=8, target_index=None) ]) - self.assertTrue(corpus.union(corpus) is corpus) + assert corpus.union(corpus) is corpus def test_union_document_corpora_is_symmetric(self): a = track.DocumentCorpus("test", documents=[ @@ -194,8 +193,8 @@ def test_union_document_corpora_is_symmetric(self): b = track.DocumentCorpus("test", documents=[ track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, number_of_documents=5, target_index="logs-02"), ]) - self.assertEqual(b.union(a), a.union(b)) - self.assertEqual(2, len(a.union(b).documents)) + assert a.union(b) == b.union(a) + assert len(a.union(b).documents) == 2 def test_cannot_union_mixed_document_corpora_by_name(self): a = track.DocumentCorpus("test", documents=[ @@ -204,9 +203,9 @@ def test_cannot_union_mixed_document_corpora_by_name(self): b = track.DocumentCorpus("other", documents=[ track.Documents(source_format=track.Documents.SOURCE_FORMAT_BULK, number_of_documents=5, target_index="logs-02"), ]) - with self.assertRaises(exceptions.RallyAssertionError) as ae: + with pytest.raises(exceptions.RallyAssertionError) as ae: a.union(b) - self.assertEqual(ae.exception.message, "Corpora names differ: [test] and [other].") + assert ae.value.message == "Corpora names differ: [test] and [other]." def test_cannot_union_mixed_document_corpora_by_meta_data(self): a = track.DocumentCorpus("test", documents=[ @@ -219,16 +218,15 @@ def test_cannot_union_mixed_document_corpora_by_meta_data(self): ], meta_data={ "with-metadata": True }) - with self.assertRaises(exceptions.RallyAssertionError) as ae: + with pytest.raises(exceptions.RallyAssertionError) as ae: a.union(b) - self.assertEqual(ae.exception.message, - "Corpora meta-data differ: [{'with-metadata': False}] and [{'with-metadata': True}].") + assert ae.value.message == "Corpora meta-data differ: [{'with-metadata': False}] and [{'with-metadata': True}]." class OperationTypeTests(TestCase): def test_string_hyphenation_is_symmetric(self): for op_type in track.OperationType: - self.assertEqual(op_type, track.OperationType.from_hyphenated_string(op_type.to_hyphenated_string())) + assert track.OperationType.from_hyphenated_string(op_type.to_hyphenated_string()) == op_type class TaskFilterTests(TestCase): @@ -246,18 +244,18 @@ def search_task(self): def test_task_name_filter(self): f = track.TaskNameFilter("create-index-task") - self.assertTrue(f.matches(self.create_index_task())) - self.assertFalse(f.matches(self.search_task())) + assert f.matches(self.create_index_task()) + assert not f.matches(self.search_task()) def test_task_op_type_filter(self): f = track.TaskOpTypeFilter(track.OperationType.CreateIndex.to_hyphenated_string()) - self.assertTrue(f.matches(self.create_index_task())) - self.assertFalse(f.matches(self.search_task())) + assert f.matches(self.create_index_task()) + assert not f.matches(self.search_task()) def test_task_tag_filter(self): f = track.TaskTagFilter(tag_name="write-op") - self.assertTrue(f.matches(self.create_index_task())) - self.assertFalse(f.matches(self.search_task())) + assert f.matches(self.create_index_task()) + assert not f.matches(self.search_task()) class TaskTests(TestCase): @@ -274,61 +272,61 @@ def task(self, schedule=None, target_throughput=None, target_interval=None, igno def test_unthrottled_task(self): task = self.task() - self.assertIsNone(task.target_throughput) + assert task.target_throughput is None def test_target_interval_zero_treated_as_unthrottled(self): task = self.task(target_interval=0) - self.assertIsNone(task.target_throughput) + assert task.target_throughput is None def test_valid_throughput_with_unit(self): task = self.task(target_throughput="5 MB/s") - self.assertEqual(track.Throughput(5.0, "MB/s"), task.target_throughput) + assert task.target_throughput == track.Throughput(5.0, "MB/s") def test_valid_throughput_numeric(self): task = self.task(target_throughput=3.2) - self.assertEqual(track.Throughput(3.2, "ops/s"), task.target_throughput) + assert task.target_throughput == track.Throughput(3.2, "ops/s") def test_invalid_throughput_format_is_rejected(self): task = self.task(target_throughput="3.2 docs") - with self.assertRaises(exceptions.InvalidSyntax) as e: + with pytest.raises(exceptions.InvalidSyntax) as e: # pylint: disable=pointless-statement task.target_throughput - self.assertEqual("Task [test] specifies invalid target throughput [3.2 docs].", e.exception.args[0]) + assert e.value.args[0] == "Task [test] specifies invalid target throughput [3.2 docs]." def test_invalid_throughput_type_is_rejected(self): task = self.task(target_throughput=True) - with self.assertRaises(exceptions.InvalidSyntax) as e: + with pytest.raises(exceptions.InvalidSyntax) as e: # pylint: disable=pointless-statement task.target_throughput - self.assertEqual("Target throughput [True] for task [test] must be string or numeric.", e.exception.args[0]) + assert e.value.args[0] == "Target throughput [True] for task [test] must be string or numeric." def test_interval_and_throughput_is_rejected(self): task = self.task(target_throughput=1, target_interval=1) - with self.assertRaises(exceptions.InvalidSyntax) as e: + with pytest.raises(exceptions.InvalidSyntax) as e: # pylint: disable=pointless-statement task.target_throughput - self.assertEqual("Task [test] specifies target-interval [1] and target-throughput [1] but only one " - "of them is allowed.", e.exception.args[0]) + assert e.value.args[0] == "Task [test] specifies target-interval [1] and target-throughput [1] but only one " \ + "of them is allowed." def test_invalid_ignore_response_error_level_is_rejected(self): task = self.task(ignore_response_error_level="invalid-value") - with self.assertRaises(exceptions.InvalidSyntax) as e: + with pytest.raises(exceptions.InvalidSyntax) as e: # pylint: disable=pointless-statement task.ignore_response_error_level - self.assertEqual("Task [test] specifies ignore-response-error-level to [invalid-value] but " - "the only allowed values are [non-fatal].", e.exception.args[0]) + assert e.value.args[0] == "Task [test] specifies ignore-response-error-level to [invalid-value] but " \ + "the only allowed values are [non-fatal]." def test_task_continues_with_global_continue(self): task = self.task() effective_on_error = task.error_behavior(default_error_behavior="continue") - self.assertEqual(effective_on_error, "continue") + assert effective_on_error == "continue" def test_task_continues_with_global_abort_and_task_override(self): task = self.task(ignore_response_error_level="non-fatal") effective_on_error = task.error_behavior(default_error_behavior="abort") - self.assertEqual(effective_on_error, "continue") + assert effective_on_error == "continue" def test_task_aborts_with_global_abort(self): task = self.task() effective_on_error = task.error_behavior(default_error_behavior="abort") - self.assertEqual(effective_on_error, "abort") + assert effective_on_error == "abort" diff --git a/tests/utils/collections_test.py b/tests/utils/collections_test.py index 700fc49c0..3442e2fd9 100644 --- a/tests/utils/collections_test.py +++ b/tests/utils/collections_test.py @@ -18,7 +18,7 @@ import random from typing import Any, Mapping -import pytest # type: ignore +import pytest from esrally.utils import collections diff --git a/tests/utils/console_test.py b/tests/utils/console_test.py index a059ba1ee..92e5d8fb7 100644 --- a/tests/utils/console_test.py +++ b/tests/utils/console_test.py @@ -45,17 +45,17 @@ def tearDownClass(cls): @mock.patch.dict(os.environ, {"RALLY_RUNNING_IN_DOCKER": random.choice(["false", "False", "FALSE", ""])}) def test_global_rally_running_in_docker_is_false(self): console.init() - self.assertEqual(False, console.RALLY_RUNNING_IN_DOCKER) + assert console.RALLY_RUNNING_IN_DOCKER is False @mock.patch.dict(os.environ, {"RALLY_RUNNING_IN_DOCKER": ""}) def test_global_rally_running_in_docker_is_false_if_unset(self): console.init() - self.assertEqual(False, console.RALLY_RUNNING_IN_DOCKER) + assert console.RALLY_RUNNING_IN_DOCKER is False @mock.patch.dict(os.environ, {"RALLY_RUNNING_IN_DOCKER": random.choice(["True", "true", "TRUE"])}) def test_global_rally_running_in_docker_is_true(self): console.init() - self.assertEqual(True, console.RALLY_RUNNING_IN_DOCKER) + assert console.RALLY_RUNNING_IN_DOCKER is True @mock.patch("sys.stdout.isatty") @mock.patch("builtins.print") diff --git a/tests/utils/convert_test.py b/tests/utils/convert_test.py index c35cec257..093caab0d 100644 --- a/tests/utils/convert_test.py +++ b/tests/utils/convert_test.py @@ -17,6 +17,8 @@ from unittest import TestCase +import pytest + from esrally.utils import convert @@ -24,16 +26,16 @@ class ToBoolTests(TestCase): def test_convert_to_true(self): values = ["True", "true", "Yes", "yes", "t", "y", "1", True] for value in values: - self.assertTrue(convert.to_bool(value), msg="Expect [%s] of type [%s] to be converted to True." % (str(value), type(value))) + assert convert.to_bool(value), "Expect [%s] of type [%s] to be converted to True." % (str(value), type(value)) def test_convert_to_false(self): values = ["False", "false", "No", "no", "f", "n", "0", False] for value in values: - self.assertFalse(convert.to_bool(value), msg="Expect [%s] of type [%s] to be converted to False." % (str(value), type(value))) + assert not convert.to_bool(value), "Expect [%s] of type [%s] to be converted to False." % (str(value), type(value)) def test_cannot_convert_invalid_value(self): values = ["Invalid", None, []] for value in values: - with self.assertRaises(ValueError, msg="Expect [%s] of type [%s] to fail to be converted." % (str(value), type(value))) as ctx: + with pytest.raises(ValueError) as ctx: convert.to_bool(value) - self.assertEqual("Cannot convert [%s] to bool." % value, ctx.exception.args[0]) + assert ctx.value.args[0] == "Cannot convert [%s] to bool." % value diff --git a/tests/utils/git_test.py b/tests/utils/git_test.py index 1b8af3409..8f8158be4 100644 --- a/tests/utils/git_test.py +++ b/tests/utils/git_test.py @@ -20,6 +20,8 @@ import unittest.mock as mock from unittest import TestCase +import pytest + from esrally import exceptions from esrally.utils import git @@ -28,8 +30,8 @@ class GitTests(TestCase): def test_is_git_working_copy(self): test_dir = os.path.dirname(os.path.dirname(__file__)) # this test is assuming that nobody stripped the git repo info in their Rally working copy - self.assertFalse(git.is_working_copy(test_dir)) - self.assertTrue(git.is_working_copy(os.path.dirname(test_dir))) + assert not git.is_working_copy(test_dir) + assert git.is_working_copy(os.path.dirname(test_dir)) @mock.patch("esrally.utils.process.run_subprocess_with_output") @mock.patch("esrally.utils.process.run_subprocess_with_logging") @@ -38,9 +40,9 @@ def test_git_version_too_old(self, run_subprocess_with_logging, run_subprocess): run_subprocess_with_logging.return_value = 64 run_subprocess.return_value = "1.0.0" - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: git.head_revision("/src") - self.assertEqual("Your git version is [1.0.0] but Rally requires at least git 1.9. Please update git.", ctx.exception.args[0]) + assert ctx.value.args[0] == "Your git version is [1.0.0] but Rally requires at least git 1.9. Please update git." run_subprocess_with_logging.assert_called_with("git -C /src --version", level=logging.DEBUG) @mock.patch("esrally.utils.io.ensure_dir") @@ -62,9 +64,9 @@ def test_clone_with_error(self, run_subprocess_with_logging, ensure_dir): src = "/src" remote = "http://github.com/some/project" - with self.assertRaises(exceptions.SupplyError) as ctx: + with pytest.raises(exceptions.SupplyError) as ctx: git.clone(src, remote) - self.assertEqual("Could not clone from [http://github.com/some/project] to [/src]", ctx.exception.args[0]) + assert ctx.value.args[0] == "Could not clone from [http://github.com/some/project] to [/src]" ensure_dir.assert_called_with(src) run_subprocess_with_logging.assert_called_with("git clone http://github.com/some/project /src") @@ -79,9 +81,9 @@ def test_fetch_successful(self, run_subprocess_with_logging): def test_fetch_with_error(self, run_subprocess_with_logging): # first call is to check the git version (0 -> succeeds), the second call is the failing checkout (1 -> fails) run_subprocess_with_logging.side_effect = [0, 1] - with self.assertRaises(exceptions.SupplyError) as ctx: + with pytest.raises(exceptions.SupplyError) as ctx: git.fetch("/src", remote="my-origin") - self.assertEqual("Could not fetch source tree from [my-origin]", ctx.exception.args[0]) + assert ctx.value.args[0] == "Could not fetch source tree from [my-origin]" run_subprocess_with_logging.assert_called_with("git -C /src fetch --prune --tags my-origin") @mock.patch("esrally.utils.process.run_subprocess_with_logging") @@ -94,9 +96,9 @@ def test_checkout_successful(self, run_subprocess_with_logging): def test_checkout_with_error(self, run_subprocess_with_logging): # first call is to check the git version (0 -> succeeds), the second call is the failing checkout (1 -> fails) run_subprocess_with_logging.side_effect = [0, 1] - with self.assertRaises(exceptions.SupplyError) as ctx: + with pytest.raises(exceptions.SupplyError) as ctx: git.checkout("/src", "feature-branch") - self.assertEqual("Could not checkout [feature-branch]. Do you have uncommitted changes?", ctx.exception.args[0]) + assert ctx.value.args[0] == "Could not checkout [feature-branch]. Do you have uncommitted changes?" run_subprocess_with_logging.assert_called_with("git -C /src checkout feature-branch") @mock.patch("esrally.utils.process.run_subprocess_with_logging") @@ -160,7 +162,7 @@ def test_pull_revision(self, run_subprocess_with_logging, run_subprocess): def test_head_revision(self, run_subprocess_with_logging, run_subprocess): run_subprocess_with_logging.return_value = 0 run_subprocess.return_value = ["3694a07"] - self.assertEqual("3694a07", git.head_revision("/src")) + assert git.head_revision("/src") == "3694a07" run_subprocess.assert_called_with("git -C /src rev-parse --short HEAD") @mock.patch("esrally.utils.process.run_subprocess_with_output") @@ -171,7 +173,7 @@ def test_list_remote_branches(self, run_subprocess_with_logging, run_subprocess) " origin/master", " origin/5.0.0-alpha1", " origin/5"] - self.assertEqual(["master", "5.0.0-alpha1", "5"], git.branches("/src", remote=True)) + assert git.branches("/src", remote=True) == ["master", "5.0.0-alpha1", "5"] run_subprocess.assert_called_with("git -C /src for-each-ref refs/remotes/ --format='%(refname:short)'") @mock.patch("esrally.utils.process.run_subprocess_with_output") @@ -182,7 +184,7 @@ def test_list_local_branches(self, run_subprocess_with_logging, run_subprocess): " master", " 5.0.0-alpha1", " 5"] - self.assertEqual(["master", "5.0.0-alpha1", "5"], git.branches("/src", remote=False)) + assert git.branches("/src", remote=False) == ["master", "5.0.0-alpha1", "5"] run_subprocess.assert_called_with("git -C /src for-each-ref refs/heads/ --format='%(refname:short)'") @mock.patch("esrally.utils.process.run_subprocess_with_output") @@ -191,7 +193,7 @@ def test_list_tags_with_tags_present(self, run_subprocess_with_logging, run_subp run_subprocess_with_logging.return_value = 0 run_subprocess.return_value = [" v1", " v2"] - self.assertEqual(["v1", "v2"], git.tags("/src")) + assert git.tags("/src") == ["v1", "v2"] run_subprocess.assert_called_with("git -C /src tag") @mock.patch("esrally.utils.process.run_subprocess_with_output") @@ -199,5 +201,5 @@ def test_list_tags_with_tags_present(self, run_subprocess_with_logging, run_subp def test_list_tags_no_tags_available(self, run_subprocess_with_logging, run_subprocess): run_subprocess_with_logging.return_value = 0 run_subprocess.return_value = "" - self.assertEqual([], git.tags("/src")) + assert git.tags("/src") == [] run_subprocess.assert_called_with("git -C /src tag") diff --git a/tests/utils/io_test.py b/tests/utils/io_test.py index f32a16d98..e5882a283 100644 --- a/tests/utils/io_test.py +++ b/tests/utils/io_test.py @@ -47,24 +47,24 @@ def mock_red_hat(path): class IoTests(TestCase): def test_normalize_path(self): - self.assertEqual("/already/a/normalized/path", io.normalize_path("/already/a/normalized/path")) - self.assertEqual("/not/normalized", io.normalize_path("/not/normalized/path/../")) - self.assertEqual(os.path.expanduser("~"), io.normalize_path("~/Documents/..")) + assert io.normalize_path("/already/a/normalized/path") == "/already/a/normalized/path" + assert io.normalize_path("/not/normalized/path/../") == "/not/normalized" + assert io.normalize_path("~/Documents/..") == os.path.expanduser("~") def test_archive(self): - self.assertTrue(io.is_archive("/tmp/some-archive.tar.gz")) - self.assertTrue(io.is_archive("/tmp/some-archive.tgz")) + assert io.is_archive("/tmp/some-archive.tar.gz") + assert io.is_archive("/tmp/some-archive.tgz") # Rally does not recognize .7z - self.assertFalse(io.is_archive("/tmp/some-archive.7z")) - self.assertFalse(io.is_archive("/tmp/some.log")) - self.assertFalse(io.is_archive("some.log")) + assert not io.is_archive("/tmp/some-archive.7z") + assert not io.is_archive("/tmp/some.log") + assert not io.is_archive("some.log") def test_has_extension(self): - self.assertTrue(io.has_extension("/tmp/some-archive.tar.gz", ".tar.gz")) - self.assertFalse(io.has_extension("/tmp/some-archive.tar.gz", ".gz")) - self.assertTrue(io.has_extension("/tmp/text.txt", ".txt")) + assert io.has_extension("/tmp/some-archive.tar.gz", ".tar.gz") + assert not io.has_extension("/tmp/some-archive.tar.gz", ".gz") + assert io.has_extension("/tmp/text.txt", ".txt") # no extension whatsoever - self.assertFalse(io.has_extension("/tmp/README", "README")) + assert not io.has_extension("/tmp/README", "README") class TestDecompression: diff --git a/tests/utils/jvm_test.py b/tests/utils/jvm_test.py index adf7af36e..2cb0ddbca 100644 --- a/tests/utils/jvm_test.py +++ b/tests/utils/jvm_test.py @@ -18,28 +18,30 @@ import unittest.mock as mock from unittest import TestCase +import pytest + from esrally import exceptions from esrally.utils import jvm class JvmTests(TestCase): def test_extract_major_version_7(self): - self.assertEqual(7, jvm.major_version("1.7", lambda x, y: x)) + assert jvm.major_version("1.7", lambda x, y: x) == 7 def test_extract_major_version_8(self): - self.assertEqual(8, jvm.major_version("1.8", lambda x, y: x)) + assert jvm.major_version("1.8", lambda x, y: x) == 8 def test_extract_major_version_9(self): - self.assertEqual(9, jvm.major_version("9", lambda x, y: x)) + assert jvm.major_version("9", lambda x, y: x) == 9 def test_extract_major_version_10(self): - self.assertEqual(10, jvm.major_version("10", lambda x, y: x)) + assert jvm.major_version("10", lambda x, y: x) == 10 def test_ea_release(self): - self.assertTrue(jvm.is_early_access_release("Oracle Corporation,9-ea", self.prop_version_reader)) + assert jvm.is_early_access_release("Oracle Corporation,9-ea", self.prop_version_reader) def test_ga_release(self): - self.assertFalse(jvm.is_early_access_release("Oracle Corporation,9", self.prop_version_reader)) + assert not jvm.is_early_access_release("Oracle Corporation,9", self.prop_version_reader) def prop_version_reader(self, java_home, prop): props = java_home.split(",") @@ -56,8 +58,8 @@ def test_resolve_path_for_one_version_via_java_home(self, getenv): getenv.side_effect = [None, "/opt/jdks/jdk/1.8"] major, resolved_path = jvm.resolve_path(majors=8, sysprop_reader=self.path_based_prop_version_reader) - self.assertEqual(8, major) - self.assertEqual("/opt/jdks/jdk/1.8", resolved_path) + assert major == 8 + assert resolved_path == "/opt/jdks/jdk/1.8" @mock.patch("os.getenv") def test_resolve_path_for_one_version_via_java_x_home(self, getenv): @@ -65,24 +67,22 @@ def test_resolve_path_for_one_version_via_java_x_home(self, getenv): getenv.side_effect = ["/opt/jdks/jdk/1.8", None] major, resolved_path = jvm.resolve_path(majors=8, sysprop_reader=self.path_based_prop_version_reader) - self.assertEqual(8, major) - self.assertEqual("/opt/jdks/jdk/1.8", resolved_path) + assert major == 8 + assert resolved_path == "/opt/jdks/jdk/1.8" @mock.patch("os.getenv") def test_resolve_path_for_one_version_no_matching_version(self, getenv): # JAVA8_HOME, JAVA_HOME getenv.side_effect = [None, "/opt/jdks/jdk/1.7"] - with self.assertRaisesRegex(expected_exception=exceptions.SystemSetupError, - expected_regex="JAVA_HOME points to JDK 7 but it should point to JDK 8."): + with pytest.raises(exceptions.SystemSetupError, match="JAVA_HOME points to JDK 7 but it should point to JDK 8."): jvm.resolve_path(majors=8, sysprop_reader=self.path_based_prop_version_reader) @mock.patch("os.getenv") def test_resolve_path_for_one_version_no_env_vars_defined(self, getenv): getenv.return_value = None - with self.assertRaisesRegex(expected_exception=exceptions.SystemSetupError, - expected_regex="Neither JAVA8_HOME nor JAVA_HOME point to a JDK 8 installation."): + with pytest.raises(exceptions.SystemSetupError, match="Neither JAVA8_HOME nor JAVA_HOME point to a JDK 8 installation."): jvm.resolve_path(majors=8, sysprop_reader=self.path_based_prop_version_reader) @mock.patch("os.getenv") @@ -106,5 +106,5 @@ def test_resolve_path_for_multiple_versions(self, getenv): "/opt/jdks/jdk/1.8", ] major, resolved_path = jvm.resolve_path(majors=[11, 10, 9, 8], sysprop_reader=self.path_based_prop_version_reader) - self.assertEqual(9, major) - self.assertEqual("/opt/jdks/jdk/9", resolved_path) + assert major == 9 + assert resolved_path == "/opt/jdks/jdk/9" diff --git a/tests/utils/opts_test.py b/tests/utils/opts_test.py index 67cd59830..c6044c990 100644 --- a/tests/utils/opts_test.py +++ b/tests/utils/opts_test.py @@ -23,37 +23,32 @@ class ConfigHelperFunctionTests(TestCase): def test_csv_to_list(self): - self.assertEqual([], opts.csv_to_list("")) - self.assertEqual(["a", "b", "c", "d"], opts.csv_to_list(" a,b,c , d")) - self.assertEqual(["a-;d", "b", "c", "d"], opts.csv_to_list(" a-;d ,b,c , d")) + assert opts.csv_to_list("") == [] + assert opts.csv_to_list(" a,b,c , d") == ["a", "b", "c", "d"] + assert opts.csv_to_list(" a-;d ,b,c , d") == ["a-;d", "b", "c", "d"] def test_kv_to_map(self): - self.assertEqual({}, opts.kv_to_map([])) + assert opts.kv_to_map([]) == {} # explicit treatment as string - self.assertEqual({"k": "3"}, opts.kv_to_map(["k:'3'"])) - self.assertEqual({"k": 3}, opts.kv_to_map(["k:3"])) + assert opts.kv_to_map(["k:'3'"]) == {"k": "3"} + assert opts.kv_to_map(["k:3"]) == {"k": 3} # implicit treatment as string - self.assertEqual({"k": "v"}, opts.kv_to_map(["k:v"])) - self.assertEqual({"k": "v", "size": 4, "empty": False, "temperature": 0.5}, - opts.kv_to_map(["k:'v'", "size:4", "empty:false", "temperature:0.5"])) + assert opts.kv_to_map(["k:v"]) == {"k": "v"} + assert opts.kv_to_map(["k:'v'", "size:4", "empty:false", "temperature:0.5"]) == {"k": "v", "size": 4, "empty": False, "temperature": 0.5} class GenericHelperFunctionTests(TestCase): def test_list_as_bulleted_list(self): src_list = ["param-1", "param-2", "a_longer-parameter"] - self.assertEqual( - ["- param-1", "- param-2", "- a_longer-parameter"], - opts.bulleted_list_of(src_list) - ) + assert opts.bulleted_list_of(src_list) == \ + ["- param-1", "- param-2", "- a_longer-parameter"] def test_list_as_double_quoted_list(self): src_list = ["oneitem", "_another-weird_item", "param-3"] - self.assertEqual( - opts.double_quoted_list_of(src_list), - ['"oneitem"', '"_another-weird_item"', '"param-3"'] - ) + assert ['"oneitem"', '"_another-weird_item"', '"param-3"'] == \ + opts.double_quoted_list_of(src_list) def test_make_list_of_close_matches(self): word_list = [ @@ -108,7 +103,7 @@ def test_make_list_of_close_matches(self): "target_throughput", "translog_sync"] - self.assertEqual( + assert opts.make_list_of_close_matches(word_list, available_word_list) == \ ['bulk_indexing_clients', 'bulk_indexing_iterations', 'target_throughput', @@ -116,64 +111,51 @@ def test_make_list_of_close_matches(self): # number_of-shards had a typo 'number_of_shards', 'number_of_replicas', - 'index_refresh_interval'], - opts.make_list_of_close_matches(word_list, available_word_list) - ) + 'index_refresh_interval'] def test_make_list_of_close_matches_returns_with_empty_word_list(self): - self.assertEqual( - [], - opts.make_list_of_close_matches([], ["number_of_shards"]) - ) + assert opts.make_list_of_close_matches([], ["number_of_shards"]) == \ + [] def test_make_list_of_close_matches_returns_empty_list_with_no_close_matches(self): - self.assertEqual( - [], - opts.make_list_of_close_matches( + assert opts.make_list_of_close_matches( ["number_of_shards", "number_of-replicas"], - []) - ) + []) == \ + [] class TestTargetHosts(TestCase): def test_empty_arg_parses_as_empty_list(self): - self.assertEqual([], opts.TargetHosts('').default) - self.assertEqual({'default': []}, opts.TargetHosts('').all_hosts) + assert opts.TargetHosts('').default == [] + assert opts.TargetHosts('').all_hosts == {'default': []} def test_csv_hosts_parses(self): target_hosts = '127.0.0.1:9200,10.17.0.5:19200' - self.assertEqual( - {'default': [{'host': '127.0.0.1', 'port': 9200},{'host': '10.17.0.5', 'port': 19200}]}, - opts.TargetHosts(target_hosts).all_hosts - ) + assert opts.TargetHosts(target_hosts).all_hosts == \ + {'default': [{'host': '127.0.0.1', 'port': 9200},{'host': '10.17.0.5', 'port': 19200}]} - self.assertEqual( - [{'host': '127.0.0.1', 'port': 9200},{'host': '10.17.0.5', 'port': 19200}], - opts.TargetHosts(target_hosts).default - ) + assert opts.TargetHosts(target_hosts).default == \ + [{'host': '127.0.0.1', 'port': 9200},{'host': '10.17.0.5', 'port': 19200}] - self.assertEqual( - [{'host': '127.0.0.1', 'port': 9200},{'host': '10.17.0.5', 'port': 19200}], - opts.TargetHosts(target_hosts).default) + assert opts.TargetHosts(target_hosts).default == \ + [{'host': '127.0.0.1', 'port': 9200},{'host': '10.17.0.5', 'port': 19200}] def test_jsonstring_parses_as_dict_of_clusters(self): target_hosts = ('{"default": ["127.0.0.1:9200","10.17.0.5:19200"],' ' "remote_1": ["88.33.22.15:19200"],' ' "remote_2": ["10.18.0.6:19200","10.18.0.7:19201"]}') - self.assertEqual( + assert opts.TargetHosts(target_hosts).all_hosts == \ {'default': ['127.0.0.1:9200','10.17.0.5:19200'], 'remote_1': ['88.33.22.15:19200'], - 'remote_2': ['10.18.0.6:19200','10.18.0.7:19201']}, - opts.TargetHosts(target_hosts).all_hosts) + 'remote_2': ['10.18.0.6:19200','10.18.0.7:19201']} def test_json_file_parameter_parses(self): - self.assertEqual( - {"default": ["127.0.0.1:9200","10.127.0.3:19200"] }, - opts.TargetHosts(os.path.join(os.path.dirname(__file__), "resources", "target_hosts_1.json")).all_hosts) + assert opts.TargetHosts(os.path.join(os.path.dirname(__file__), "resources", "target_hosts_1.json")).all_hosts == \ + {"default": ["127.0.0.1:9200","10.127.0.3:19200"] } - self.assertEqual( + assert opts.TargetHosts(os.path.join(os.path.dirname(__file__), "resources", "target_hosts_2.json")).all_hosts == \ { "default": [ {"host": "127.0.0.1", "port": 9200}, @@ -186,106 +168,87 @@ def test_json_file_parameter_parses(self): "remote_2":[ {"host": "88.33.27.15", "port": 39200} ] - }, - opts.TargetHosts(os.path.join(os.path.dirname(__file__), "resources", "target_hosts_2.json")).all_hosts) + } class TestClientOptions(TestCase): def test_csv_client_options_parses(self): client_options_string = "use_ssl:true,verify_certs:true,ca_certs:'/path/to/cacert.pem'" - self.assertEqual( - {'use_ssl': True, 'verify_certs': True, 'ca_certs': '/path/to/cacert.pem'}, - opts.ClientOptions(client_options_string).default) + assert opts.ClientOptions(client_options_string).default == \ + {'use_ssl': True, 'verify_certs': True, 'ca_certs': '/path/to/cacert.pem'} - self.assertEqual( - {'use_ssl': True, 'verify_certs': True, 'ca_certs': '/path/to/cacert.pem'}, - opts.ClientOptions(client_options_string).default - ) + assert opts.ClientOptions(client_options_string).default == \ + {'use_ssl': True, 'verify_certs': True, 'ca_certs': '/path/to/cacert.pem'} - self.assertEqual( - {'default': {'use_ssl': True, 'verify_certs': True, 'ca_certs': '/path/to/cacert.pem'}}, - opts.ClientOptions(client_options_string).all_client_options - ) + assert opts.ClientOptions(client_options_string).all_client_options == \ + {'default': {'use_ssl': True, 'verify_certs': True, 'ca_certs': '/path/to/cacert.pem'}} def test_jsonstring_client_options_parses(self): client_options_string = '{"default": {"timeout": 60},' \ '"remote_1": {"use_ssl":true,"verify_certs":true,"basic_auth_user": "elastic", "basic_auth_password": "changeme"},'\ '"remote_2": {"use_ssl":true,"verify_certs":true,"ca_certs":"/path/to/cacert.pem"}}' - self.assertEqual( - {'timeout': 60}, - opts.ClientOptions(client_options_string).default) + assert opts.ClientOptions(client_options_string).default == \ + {'timeout': 60} - self.assertEqual( - {'timeout': 60}, - opts.ClientOptions(client_options_string).default) + assert opts.ClientOptions(client_options_string).default == \ + {'timeout': 60} - self.assertEqual( + assert opts.ClientOptions(client_options_string).all_client_options == \ {'default': {'timeout':60}, 'remote_1': {'use_ssl': True,'verify_certs': True,'basic_auth_user':'elastic','basic_auth_password':'changeme'}, - 'remote_2': {'use_ssl': True,'verify_certs': True, 'ca_certs':'/path/to/cacert.pem'}}, - opts.ClientOptions(client_options_string).all_client_options) + 'remote_2': {'use_ssl': True,'verify_certs': True, 'ca_certs':'/path/to/cacert.pem'}} def test_json_file_parameter_parses(self): - self.assertEqual( + assert opts.ClientOptions(os.path.join(os.path.dirname(__file__), "resources", "client_options_1.json")).all_client_options == \ {'default': {'timeout':60}, 'remote_1': {'use_ssl': True,'verify_certs': True,'basic_auth_user':'elastic','basic_auth_password':'changeme'}, - 'remote_2': {'use_ssl': True,'verify_certs': True, 'ca_certs':'/path/to/cacert.pem'}}, - opts.ClientOptions(os.path.join(os.path.dirname(__file__), "resources", "client_options_1.json")).all_client_options) + 'remote_2': {'use_ssl': True,'verify_certs': True, 'ca_certs':'/path/to/cacert.pem'}} - self.assertEqual( - {'default': {'timeout':60}}, - opts.ClientOptions(os.path.join(os.path.dirname(__file__), "resources", "client_options_2.json")).all_client_options) + assert opts.ClientOptions(os.path.join(os.path.dirname(__file__), "resources", "client_options_2.json")).all_client_options == \ + {'default': {'timeout':60}} def test_no_client_option_parses_to_default(self): client_options_string = opts.ClientOptions.DEFAULT_CLIENT_OPTIONS target_hosts = None - self.assertEqual( - {"timeout": 60}, - opts.ClientOptions(client_options_string, - target_hosts=target_hosts).default) + assert opts.ClientOptions(client_options_string, + target_hosts=target_hosts).default == \ + {"timeout": 60} - self.assertEqual( - {"default": {"timeout": 60}}, - opts.ClientOptions(client_options_string, - target_hosts=target_hosts).all_client_options) + assert opts.ClientOptions(client_options_string, + target_hosts=target_hosts).all_client_options == \ + {"default": {"timeout": 60}} - self.assertEqual( - {"timeout": 60}, - opts.ClientOptions(client_options_string, - target_hosts=target_hosts).default) + assert opts.ClientOptions(client_options_string, + target_hosts=target_hosts).default == \ + {"timeout": 60} def test_no_client_option_parses_to_default_with_multicluster(self): client_options_string = opts.ClientOptions.DEFAULT_CLIENT_OPTIONS target_hosts = opts.TargetHosts('{"default": ["127.0.0.1:9200,10.17.0.5:19200"], "remote": ["88.33.22.15:19200"]}') - self.assertEqual( - {"timeout": 60}, - opts.ClientOptions(client_options_string, - target_hosts=target_hosts).default) + assert opts.ClientOptions(client_options_string, + target_hosts=target_hosts).default == \ + {"timeout": 60} - self.assertEqual( - {"default": {"timeout": 60}, "remote": {"timeout": 60}}, - opts.ClientOptions(client_options_string, - target_hosts=target_hosts).all_client_options) + assert opts.ClientOptions(client_options_string, + target_hosts=target_hosts).all_client_options == \ + {"default": {"timeout": 60}, "remote": {"timeout": 60}} - self.assertEqual( - {"timeout": 60}, - opts.ClientOptions(client_options_string, - target_hosts=target_hosts).default) + assert opts.ClientOptions(client_options_string, + target_hosts=target_hosts).default == \ + {"timeout": 60} def test_amends_with_max_connections(self): client_options_string = opts.ClientOptions.DEFAULT_CLIENT_OPTIONS target_hosts = opts.TargetHosts('{"default": ["10.17.0.5:9200"], "remote": ["88.33.22.15:9200"]}') - self.assertEqual( - {"default": {"timeout": 60, "max_connections": 128}, "remote": {"timeout": 60, "max_connections": 128}}, - opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(128)) + assert opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(128) == \ + {"default": {"timeout": 60, "max_connections": 128}, "remote": {"timeout": 60, "max_connections": 128}} def test_keeps_already_specified_max_connections(self): client_options_string = '{"default": {"timeout":60,"max_connections":5}, "remote": {"timeout":60}}' target_hosts = opts.TargetHosts('{"default": ["10.17.0.5:9200"], "remote": ["88.33.22.15:9200"]}') - self.assertEqual( - {"default": {"timeout": 60, "max_connections": 5}, "remote": {"timeout": 60, "max_connections": 32}}, - opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(32)) + assert opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(32) == \ + {"default": {"timeout": 60, "max_connections": 5}, "remote": {"timeout": 60, "max_connections": 32}} diff --git a/tests/utils/process_test.py b/tests/utils/process_test.py index 7b46f26bc..1871a4e0d 100644 --- a/tests/utils/process_test.py +++ b/tests/utils/process_test.py @@ -82,8 +82,7 @@ def test_find_other_rally_processes(self, process_iter): night_rally_process, ] - self.assertEqual([rally_process_p, rally_process_r, rally_process_e, rally_process_mac], - process.find_all_other_rally_processes()) + assert process.find_all_other_rally_processes() == [rally_process_p, rally_process_r, rally_process_e, rally_process_mac] @mock.patch("psutil.process_iter") def test_find_no_other_rally_process_running(self, process_iter): @@ -94,7 +93,7 @@ def test_find_no_other_rally_process_running(self, process_iter): process_iter.return_value = [ metrics_store_process, random_python] - self.assertEqual(0, len(process.find_all_other_rally_processes())) + assert len(process.find_all_other_rally_processes()) == 0 @mock.patch("psutil.process_iter") def test_kills_only_rally_processes(self, process_iter): @@ -133,14 +132,14 @@ def test_kills_only_rally_processes(self, process_iter): process.kill_running_rally_instances() - self.assertFalse(rally_es_5_process.killed) - self.assertFalse(rally_es_1_process.killed) - self.assertFalse(metrics_store_process.killed) - self.assertFalse(random_python.killed) - self.assertFalse(other_process.killed) - self.assertTrue(rally_process_p.killed) - self.assertTrue(rally_process_r.killed) - self.assertTrue(rally_process_e.killed) - self.assertTrue(rally_process_mac.killed) - self.assertFalse(own_rally_process.killed) - self.assertFalse(night_rally_process.killed) + assert not rally_es_5_process.killed + assert not rally_es_1_process.killed + assert not metrics_store_process.killed + assert not random_python.killed + assert not other_process.killed + assert rally_process_p.killed + assert rally_process_r.killed + assert rally_process_e.killed + assert rally_process_mac.killed + assert not own_rally_process.killed + assert not night_rally_process.killed diff --git a/tests/utils/repo_test.py b/tests/utils/repo_test.py index 4ef8dc1c8..e2c542c51 100644 --- a/tests/utils/repo_test.py +++ b/tests/utils/repo_test.py @@ -19,6 +19,8 @@ import unittest.mock as mock from unittest import TestCase +import pytest + from esrally import exceptions from esrally.utils import repo @@ -30,7 +32,7 @@ def test_fails_in_offline_mode_if_not_a_git_repo(self, is_working_copy, exists): is_working_copy.return_value = False exists.return_value = True - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: repo.RallyRepository( remote_url=None, root_dir="/rally-resources", @@ -38,8 +40,8 @@ def test_fails_in_offline_mode_if_not_a_git_repo(self, is_working_copy, exists): resource_name="unittest-resources", offline=True) - self.assertEqual("[/rally-resources/unit-test] must be a git repository.\n\n" - "Please run:\ngit -C /rally-resources/unit-test init", ctx.exception.args[0]) + assert ctx.value.args[0] == "[/rally-resources/unit-test] must be a git repository.\n\n" \ + "Please run:\ngit -C /rally-resources/unit-test init" @mock.patch("esrally.utils.io.exists", autospec=True) @mock.patch("esrally.utils.git.is_working_copy", autospec=True) @@ -47,7 +49,7 @@ def test_fails_in_offline_mode_if_not_existing(self, is_working_copy, exists): is_working_copy.return_value = False exists.return_value = False - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: repo.RallyRepository( remote_url=None, root_dir="/rally-resources", @@ -55,8 +57,7 @@ def test_fails_in_offline_mode_if_not_existing(self, is_working_copy, exists): resource_name="unittest-resources", offline=True) - self.assertEqual("Expected a git repository at [/rally-resources/unit-test] but the directory does not exist.", - ctx.exception.args[0]) + assert ctx.value.args[0] == "Expected a git repository at [/rally-resources/unit-test] but the directory does not exist." @mock.patch("esrally.utils.git.is_working_copy", autospec=True) def test_does_nothing_if_working_copy_present(self, is_working_copy): @@ -69,7 +70,7 @@ def test_does_nothing_if_working_copy_present(self, is_working_copy): resource_name="unittest-resources", offline=True) - self.assertFalse(r.remote) + assert not r.remote @mock.patch("esrally.utils.git.is_working_copy", autospec=True) @mock.patch("esrally.utils.git.clone", autospec=True) @@ -83,7 +84,7 @@ def test_clones_initially(self, clone, is_working_copy): resource_name="unittest-resources", offline=False) - self.assertTrue(r.remote) + assert r.remote clone.assert_called_with(src="/rally-resources/unit-test", remote="git@gitrepos.example.org/rally-resources") @@ -114,9 +115,9 @@ def test_does_not_fetch_if_suppressed(self, fetch, is_working_copy): offline=False, fetch=False) - self.assertTrue(r.remote) + assert r.remote - self.assertEqual(0, fetch.call_count) + assert fetch.call_count == 0 @mock.patch("esrally.utils.git.is_working_copy", autospec=True) @mock.patch("esrally.utils.git.fetch") @@ -131,7 +132,7 @@ def test_ignores_fetch_errors(self, fetch, is_working_copy): resource_name="unittest-resources", offline=False) # no exception during the call - we reach this here - self.assertTrue(r.remote) + assert r.remote fetch.assert_called_with(src="/rally-resources/unit-test") @@ -182,7 +183,7 @@ def test_updates_locally(self, curr_branch, rebase, checkout, branches, fetch, i r.update(distribution_version="6.0.0") branches.assert_called_with("/rally-resources/unit-test", remote=False) - self.assertEqual(0, rebase.call_count) + assert rebase.call_count == 0 checkout.assert_called_with("/rally-resources/unit-test", branch="master") @mock.patch("esrally.utils.git.head_revision") @@ -210,7 +211,7 @@ def test_fallback_to_tags(self, curr_branch, rebase, checkout, branches, tags, f r.update(distribution_version="1.7.4") branches.assert_called_with("/rally-resources/unit-test", remote=False) - self.assertEqual(0, rebase.call_count) + assert rebase.call_count == 0 tags.assert_called_with("/rally-resources/unit-test") checkout.assert_called_with("/rally-resources/unit-test", branch="v1.7") @@ -232,12 +233,12 @@ def test_does_not_update_unknown_branch_remotely(self, rebase, checkout, branche resource_name="unittest-resources", offline=False) - self.assertTrue(r.remote) + assert r.remote - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: r.update(distribution_version="4.0.0") - self.assertEqual("Cannot find unittest-resources for distribution version 4.0.0", ctx.exception.args[0]) + assert ctx.value.args[0] == "Cannot find unittest-resources for distribution version 4.0.0" calls = [ # first try to find it remotely... @@ -248,8 +249,8 @@ def test_does_not_update_unknown_branch_remotely(self, rebase, checkout, branche branches.assert_has_calls(calls) tags.assert_called_with("/rally-resources/unit-test") - self.assertEqual(0, checkout.call_count) - self.assertEqual(0, rebase.call_count) + assert checkout.call_count == 0 + assert rebase.call_count == 0 @mock.patch("esrally.utils.git.head_revision") @mock.patch("esrally.utils.git.is_working_copy", autospec=True) @@ -285,9 +286,9 @@ def test_does_not_update_unknown_branch_remotely_local_fallback(self, curr_branc ] branches.assert_has_calls(calls) - self.assertEqual(0, tags.call_count) + assert tags.call_count == 0 checkout.assert_called_with("/rally-resources/unit-test", branch="1") - self.assertEqual(0, rebase.call_count) + assert rebase.call_count == 0 @mock.patch("esrally.utils.git.is_working_copy", autospec=True) @mock.patch("esrally.utils.git.fetch", autospec=True) @@ -307,14 +308,14 @@ def test_does_not_update_unknown_branch_locally(self, rebase, checkout, branches resource_name="unittest-resources", offline=False) - with self.assertRaises(exceptions.SystemSetupError) as ctx: + with pytest.raises(exceptions.SystemSetupError) as ctx: r.update(distribution_version="4.0.0") - self.assertEqual("Cannot find unittest-resources for distribution version 4.0.0", ctx.exception.args[0]) + assert ctx.value.args[0] == "Cannot find unittest-resources for distribution version 4.0.0" branches.assert_called_with("/rally-resources/unit-test", remote=False) - self.assertEqual(0, checkout.call_count) - self.assertEqual(0, rebase.call_count) + assert checkout.call_count == 0 + assert rebase.call_count == 0 @mock.patch("esrally.utils.git.is_working_copy", autospec=True) @mock.patch("esrally.utils.git.fetch", autospec=True)