From 3d027d7e3817ec265872434378306f1e92fda9bb Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 22 Jan 2025 17:43:07 +0100 Subject: [PATCH] ruff rule E501: Fix long lines in Python code (#1910) * ruff rule E501: Fix long lines in Python code * Add missing space --------- Co-authored-by: dominic <510002+dmah42@users.noreply.github.com> --- .ycm_extra_conf.py | 8 ++--- bindings/python/google_benchmark/__init__.py | 15 +++++---- bindings/python/google_benchmark/example.py | 3 +- pyproject.toml | 1 - tools/compare.py | 35 ++++++++++++++++---- tools/gbench/report.py | 16 ++++++--- tools/gbench/util.py | 8 +++-- 7 files changed, 60 insertions(+), 26 deletions(-) diff --git a/.ycm_extra_conf.py b/.ycm_extra_conf.py index caf257f054..ffef1b4daf 100644 --- a/.ycm_extra_conf.py +++ b/.ycm_extra_conf.py @@ -83,10 +83,10 @@ def IsHeaderFile(filename): def GetCompilationInfoForFile(filename): - # The compilation_commands.json file generated by CMake does not have entries - # for header files. So we do our best by asking the db for flags for a - # corresponding source file, if any. If one exists, the flags for that file - # should be good enough. + # The compilation_commands.json file generated by CMake does not have + # entries for header files. So we do our best by asking the db for flags for + # a corresponding source file, if any. If one exists, the flags for that + # file should be good enough. if IsHeaderFile(filename): basename = os.path.splitext(filename)[0] for extension in SOURCE_EXTENSIONS: diff --git a/bindings/python/google_benchmark/__init__.py b/bindings/python/google_benchmark/__init__.py index 7006352669..3685928f21 100644 --- a/bindings/python/google_benchmark/__init__.py +++ b/bindings/python/google_benchmark/__init__.py @@ -60,7 +60,8 @@ class __OptionMaker: """ class Options: - """Pure data class to store options calls, along with the benchmarked function.""" + """Pure data class to store options calls, along with the benchmarked + function.""" def __init__(self, func): self.func = func @@ -83,8 +84,8 @@ def __builder_method(*args, **kwargs): def __decorator(func_or_options): options = self.make(func_or_options) options.builder_calls.append((builder_name, args, kwargs)) - # The decorator returns Options so it is not technically a decorator - # and needs a final call to @register + # The decorator returns Options so it is not technically a + # decorator and needs a final call to @register return options return __decorator @@ -93,8 +94,8 @@ def __decorator(func_or_options): # Alias for nicer API. -# We have to instantiate an object, even if stateless, to be able to use __getattr__ -# on option.range +# We have to instantiate an object, even if stateless, to be able to use +# __getattr__ on option.range option = __OptionMaker() @@ -104,8 +105,8 @@ def register(undefined=None, *, name=None): # Decorator is called without parenthesis so we return a decorator return lambda f: register(f, name=name) - # We have either the function to benchmark (simple case) or an instance of Options - # (@option._ case). + # We have either the function to benchmark (simple case) or an instance of + # Options (@option._ case). options = __OptionMaker.make(undefined) if name is None: diff --git a/bindings/python/google_benchmark/example.py b/bindings/python/google_benchmark/example.py index 5909c0fc0e..5635c41842 100644 --- a/bindings/python/google_benchmark/example.py +++ b/bindings/python/google_benchmark/example.py @@ -13,7 +13,8 @@ # limitations under the License. """Example of Python using C++ benchmark framework. -To run this example, you must first install the `google_benchmark` Python package. +To run this example, you must first install the `google_benchmark` Python +package. To install using `setup.py`, download and extract the `google_benchmark` source. In the extracted directory, execute: diff --git a/pyproject.toml b/pyproject.toml index 761473c204..4595b6dd11 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,6 @@ target-version = "py311" # Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default. select = ["ASYNC", "B", "C4", "C90", "E", "F", "I", "PERF", "PIE", "PT018", "RUF", "SIM", "UP", "W"] ignore = [ - "E501", # line too long "PLW2901", # redefined-loop-name "UP031", # printf-string-formatting ] diff --git a/tools/compare.py b/tools/compare.py index 36cbe07569..1dd9de239f 100755 --- a/tools/compare.py +++ b/tools/compare.py @@ -85,7 +85,10 @@ def create_parser(): "-d", "--dump_to_json", dest="dump_to_json", - help="Additionally, dump benchmark comparison output to this file in JSON format.", + help=( + "Additionally, dump benchmark comparison output to this file in" + " JSON format." + ), ) utest = parser.add_argument_group() @@ -94,7 +97,16 @@ def create_parser(): dest="utest", default=True, action="store_false", - help=f"The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {report.UTEST_MIN_REPETITIONS} repetitions were done.\nThis option can disable the U Test.", + help=( + "The tool can do a two-tailed Mann-Whitney U test with the null" + " hypothesis that it is equally likely that a randomly selected" + " value from one sample will be less than or greater than a" + " randomly selected value from a second sample.\nWARNING: requires" + f" **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS})" + " number of repetitions to be meaningful!\nThe test is being done" + f" by default, if at least {report.UTEST_MIN_REPETITIONS}" + " repetitions were done.\nThis option can disable the U Test." + ), ) alpha_default = 0.05 utest.add_argument( @@ -103,7 +115,9 @@ def create_parser(): default=alpha_default, type=float, help=( - "significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)" + "significance level alpha. if the calculated p-value is below this" + " value, then the result is said to be statistically significant" + " and the null hypothesis is rejected.\n(default: %0.4f)" ) % alpha_default, ) @@ -114,7 +128,10 @@ def create_parser(): parser_a = subparsers.add_parser( "benchmarks", - help="The most simple use-case, compare all the output of these two benchmarks", + help=( + "The most simple use-case, compare all the output of these two" + " benchmarks" + ), ) baseline = parser_a.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( @@ -178,7 +195,10 @@ def create_parser(): parser_c = subparsers.add_parser( "benchmarksfiltered", - help="Compare filter one of first benchmark with filter two of the second benchmark", + help=( + "Compare filter one of first benchmark with filter two of the" + " second benchmark" + ), ) baseline = parser_c.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( @@ -203,7 +223,10 @@ def create_parser(): metavar="test_contender", type=argparse.FileType("r"), nargs=1, - help="The second benchmark executable or JSON output file, that will be compared against the baseline", + help=( + "The second benchmark executable or JSON output file, that will be" + " compared against the baseline" + ), ) contender.add_argument( "filter_contender", diff --git a/tools/gbench/report.py b/tools/gbench/report.py index 6b58918bfc..e143e45a71 100644 --- a/tools/gbench/report.py +++ b/tools/gbench/report.py @@ -249,7 +249,10 @@ def get_utest_color(pval): # We still got some results to show but issue a warning about it. if not utest["have_optimal_repetitions"]: dsc_color = BC_WARNING - dsc += f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+ repetitions recommended." + dsc += ( + f". WARNING: Results unreliable! {UTEST_OPTIMAL_REPETITIONS}+" + " repetitions recommended." + ) special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" @@ -397,12 +400,17 @@ def get_color(res): first_col_width = find_longest_name(json_diff_report) first_col_width = max(first_col_width, len("Benchmark")) first_col_width += len(UTEST_COL_NAME) - first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( - "Benchmark", 12 + first_col_width + fmt_str = ( + "{:<{}s}Time CPU Time Old Time New CPU Old" + " CPU New" ) + first_line = fmt_str.format("Benchmark", 12 + first_col_width) output_strs = [first_line, "-" * len(first_line)] - fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" + fmt_str = ( + "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}" + "{endc}{:14.0f}{:14.0f}" + ) for benchmark in json_diff_report: # *If* we were asked to only include aggregates, # and if it is non-aggregate, then don't print it. diff --git a/tools/gbench/util.py b/tools/gbench/util.py index 596b51a07c..2e91006be4 100644 --- a/tools/gbench/util.py +++ b/tools/gbench/util.py @@ -1,4 +1,6 @@ -"""util.py - General utilities for running, loading, and processing benchmarks""" +"""util.py - General utilities for running, loading, and processing +benchmarks +""" import json import os @@ -141,8 +143,8 @@ def benchmark_wanted(benchmark): json_schema_version = results["context"]["json_schema_version"] if json_schema_version != 1: print( - "In %s, got unnsupported JSON schema version: %i, expected 1" - % (fname, json_schema_version) + f"In {fname}, got unnsupported JSON schema version:" + f" {json_schema_version}, expected 1" ) sys.exit(1) if "benchmarks" in results: