Skip to content

Commit

Permalink
Avoid too many public methods
Browse files Browse the repository at this point in the history
With this commit we re-enabled the pylint check `R0904` which checks for
too many public methods. We have eliminated the warnings by using
different strategies:

* We have merged too fine-grained methods into more coarse-grained ones.
* We have eliminated some methods that have been called once or never.
* We have properly marked protected methods.
* Where appropriate we have disabled the warning (in tests).

Relates elastic#838
  • Loading branch information
danielmitterdorfer committed Nov 30, 2020
1 parent 2fe4ded commit 4c6e22c
Show file tree
Hide file tree
Showing 10 changed files with 284 additions and 388 deletions.
1 change: 0 additions & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,6 @@ disable=print-statement,
C0330,
C0415,
C4001,
R0904,
R0916,
W0201,
W0613,
Expand Down
17 changes: 8 additions & 9 deletions esrally/driver/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def receiveMsg_StartBenchmark(self, msg, sender):
@actor.no_retry("driver") # pylint: disable=no-value-for-parameter
def receiveMsg_TrackPrepared(self, msg, sender):
self.transition_when_all_children_responded(sender, msg,
expected_status=None, new_status=None, transition=self.after_track_prepared)
expected_status=None, new_status=None, transition=self._after_track_prepared)

@actor.no_retry("driver") # pylint: disable=no-value-for-parameter
def receiveMsg_JoinPointReached(self, msg, sender):
Expand Down Expand Up @@ -265,9 +265,6 @@ def on_task_finished(self, metrics, next_task_scheduled_in):
self.coordinator.reset_relative_time()
self.send(self.start_sender, TaskFinished(metrics, next_task_scheduled_in))

def create_track_preparator(self, host):
return self.createActor(TrackPreparationActor, targetActorRequirements=self._requirements(host))

def _requirements(self, host):
if host == "localhost":
return {"coordinator": True}
Expand All @@ -277,13 +274,16 @@ def _requirements(self, host):
def on_cluster_details_retrieved(self, cluster_details):
self.cluster_details = cluster_details

def on_prepare_track(self, preparators, cfg, track):
self.children = preparators
def prepare_track(self, hosts, cfg, track):
self.children = [self._create_track_preparator(h) for h in hosts]
msg = PrepareTrack(cfg, track)
for child in self.children:
self.send(child, msg)

def after_track_prepared(self):
def _create_track_preparator(self, host):
return self.createActor(TrackPreparationActor, targetActorRequirements=self._requirements(host))

def _after_track_prepared(self):
cluster_version = self.cluster_details["version"] if self.cluster_details else {}
for child in self.children:
self.send(child, thespian.actors.ActorExitRequest())
Expand Down Expand Up @@ -452,8 +452,7 @@ def prepare_benchmark(self, t):

self.load_driver_hosts.append(host_config)

preps = [self.target.create_track_preparator(h["host"]) for h in self.load_driver_hosts]
self.target.on_prepare_track(preps, self.config, self.track)
self.target.prepare_track([h["host"] for h in self.load_driver_hosts], self.config, self.track)

def start_benchmark(self):
self.logger.info("Benchmark is about to start.")
Expand Down
100 changes: 10 additions & 90 deletions esrally/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ def close(self):
"""
self.logger.info("Closing metrics store.")
self.flush()
self.clear_meta_info()
self._clear_meta_info()
self.opened = False

def add_meta_info(self, scope, scope_key, key, value):
Expand All @@ -437,7 +437,7 @@ def add_meta_info(self, scope, scope_key, key, value):
else:
raise exceptions.SystemSetupError("Unknown meta info scope [%s]" % scope)

def clear_meta_info(self):
def _clear_meta_info(self):
"""
Clears all internally stored meta-info. This is considered Rally internal API and not intended for normal client consumption.
"""
Expand All @@ -446,28 +446,6 @@ def clear_meta_info(self):
MetaInfoScope.node: {}
}

def merge_meta_info(self, to_merge):
"""
Merges the current meta info with another one.
:param to_merge: A meta info representation that should be merged with the current one.
"""
if MetaInfoScope.cluster in to_merge:
self._meta_info[MetaInfoScope.cluster].update(to_merge[MetaInfoScope.cluster])
if MetaInfoScope.node in to_merge:
self._meta_info[MetaInfoScope.node].update(to_merge[MetaInfoScope.node])

@property
def meta_info(self):
"""
:return: All currently stored meta-info. This is considered Rally internal API and not intended for normal client consumption.
"""
return self._meta_info

@meta_info.setter
def meta_info(self, meta_info):
self._meta_info = meta_info

@property
def open_context(self):
return {
Expand All @@ -478,58 +456,14 @@ def open_context(self):
"car": self._car
}

def put_count_cluster_level(self, name, count, unit=None, task=None, operation=None, operation_type=None, sample_type=SampleType.Normal,
absolute_time=None, relative_time=None, meta_data=None):
"""
Adds a new cluster level counter metric.
:param name: The name of the metric.
:param count: The metric value. It is expected to be of type int (otherwise use put_value_*).
:param unit: A count may or may not have unit.
:param task: The task name to which this value applies. Optional. Defaults to None.
:param operation: The operation name to which this value applies. Optional. Defaults to None.
:param operation_type: The operation type to which this value applies. Optional. Defaults to None.
:param sample_type: Whether this is a warmup or a normal measurement sample. Defaults to SampleType.Normal.
:param absolute_time: The absolute timestamp in seconds since epoch when this metric record is stored. Defaults to None. The metrics
store will derive the timestamp automatically.
:param relative_time: The relative timestamp in seconds since the start of the benchmark when this metric record is stored.
Defaults to None. The metrics store will derive the timestamp automatically.
:param meta_data: A dict, containing additional key-value pairs. Defaults to None.
"""
self._put_metric(MetaInfoScope.cluster, None, name, count, unit, task, operation, operation_type, sample_type, absolute_time,
relative_time, meta_data)

def put_count_node_level(self, node_name, name, count, unit=None, task=None, operation=None, operation_type=None,
sample_type=SampleType.Normal, absolute_time=None, relative_time=None, meta_data=None):
"""
Adds a new node level counter metric.
:param name: The name of the metric.
:param node_name: The name of the cluster node for which this metric has been determined.
:param count: The metric value. It is expected to be of type int (otherwise use put_value_*).
:param unit: A count may or may not have unit.
:param task: The task name to which this value applies. Optional. Defaults to None.
:param operation: The operation name to which this value applies. Optional. Defaults to None.
:param operation_type: The operation type to which this value applies. Optional. Defaults to None.
:param sample_type Whether this is a warmup or a normal measurement sample. Defaults to SampleType.Normal.
:param absolute_time: The absolute timestamp in seconds since epoch when this metric record is stored. Defaults to None. The metrics
store will derive the timestamp automatically.
:param relative_time: The relative timestamp in seconds since the start of the benchmark when this metric record is stored.
Defaults to None. The metrics store will derive the timestamp automatically.
:param meta_data: A dict, containing additional key-value pairs. Defaults to None.
"""
self._put_metric(MetaInfoScope.node, node_name, name, count, unit, task, operation, operation_type, sample_type, absolute_time,
relative_time, meta_data)

# should be a float
def put_value_cluster_level(self, name, value, unit, task=None, operation=None, operation_type=None, sample_type=SampleType.Normal,
def put_value_cluster_level(self, name, value, unit=None, task=None, operation=None, operation_type=None, sample_type=SampleType.Normal,
absolute_time=None, relative_time=None, meta_data=None):
"""
Adds a new cluster level value metric.
:param name: The name of the metric.
:param value: The metric value. It is expected to be of type float (otherwise use put_count_*).
:param unit: The unit of this metric value (e.g. ms, docs/s).
:param value: The metric value. It is expected to be numeric.
:param unit: The unit of this metric value (e.g. ms, docs/s). Optional. Defaults to None.
:param task: The task name to which this value applies. Optional. Defaults to None.
:param operation: The operation name to which this value applies. Optional. Defaults to None.
:param operation_type: The operation type to which this value applies. Optional. Defaults to None.
Expand All @@ -543,15 +477,15 @@ def put_value_cluster_level(self, name, value, unit, task=None, operation=None,
self._put_metric(MetaInfoScope.cluster, None, name, value, unit, task, operation, operation_type, sample_type, absolute_time,
relative_time, meta_data)

def put_value_node_level(self, node_name, name, value, unit, task=None, operation=None, operation_type=None,
def put_value_node_level(self, node_name, name, value, unit=None, task=None, operation=None, operation_type=None,
sample_type=SampleType.Normal, absolute_time=None, relative_time=None, meta_data=None):
"""
Adds a new node level value metric.
:param name: The name of the metric.
:param node_name: The name of the cluster node for which this metric has been determined.
:param value: The metric value. It is expected to be of type float (otherwise use put_count_*).
:param unit: The unit of this metric value (e.g. ms, docs/s)
:param value: The metric value. It is expected to be numeric.
:param unit: The unit of this metric value (e.g. ms, docs/s). Optional. Defaults to None.
:param task: The task name to which this value applies. Optional. Defaults to None.
:param operation: The operation name to which this value applies. Optional. Defaults to None.
:param operation_type: The operation type to which this value applies. Optional. Defaults to None.
Expand Down Expand Up @@ -738,21 +672,6 @@ def get_unit(self, name, task=None, node_name=None):
def _get(self, name, task, operation_type, sample_type, node_name, mapper):
raise NotImplementedError("abstract method")

def get_count(self, name, task=None, operation_type=None, sample_type=None):
"""
:param name: The metric name to query.
:param task The task name to query. Optional.
:param operation_type The operation type to query. Optional.
:param sample_type The sample type to query. Optional. By default, all samples are considered.
:return: The number of samples for this metric.
"""
stats = self.get_stats(name, task, operation_type, sample_type)
if stats:
return stats["count"]
else:
return 0

def get_error_rate(self, task, operation_type=None, sample_type=None):
"""
Gets the error rate for a specific task.
Expand Down Expand Up @@ -1767,7 +1686,8 @@ def median(self, metric_name, task_name=None, operation_type=None, sample_type=N

def single_latency(self, task, metric_name="latency"):
sample_type = SampleType.Normal
sample_size = self.store.get_count(metric_name, task=task, sample_type=sample_type)
stats = self.store.get_stats(metric_name, task=task, sample_type=sample_type)
sample_size = stats["count"] if stats else 0
if sample_size > 0:
percentiles = self.store.get_percentiles(metric_name,
task=task,
Expand Down
Loading

0 comments on commit 4c6e22c

Please sign in to comment.