Skip to content

Commit

Permalink
Merge branch 'main' into fixsan
Browse files Browse the repository at this point in the history
  • Loading branch information
dmah42 authored Jan 30, 2025
2 parents 104fc3d + 4a805f9 commit 20fcc91
Show file tree
Hide file tree
Showing 17 changed files with 223 additions and 124 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build-and-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, ubuntu-20.04, macos-latest]
os: [ubuntu-22.04, ubuntu-20.04, ubuntu-22.04-arm, macos-latest]
build_type: ['Release', 'Debug']
compiler: ['g++', 'clang++']
lib: ['shared', 'static']
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ repos:
types_or: [ python, pyi ]
args: [ "--ignore-missing-imports", "--scripts-are-modules" ]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.1
rev: v0.9.2
hooks:
- id: ruff
args: [ --fix, --exit-non-zero-on-fix ]
Expand Down
8 changes: 4 additions & 4 deletions .ycm_extra_conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,10 @@ def IsHeaderFile(filename):


def GetCompilationInfoForFile(filename):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
# The compilation_commands.json file generated by CMake does not have
# entries for header files. So we do our best by asking the db for flags for
# a corresponding source file, if any. If one exists, the flags for that
# file should be good enough.
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:
Expand Down
15 changes: 8 additions & 7 deletions bindings/python/google_benchmark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ class __OptionMaker:
"""

class Options:
"""Pure data class to store options calls, along with the benchmarked function."""
"""Pure data class to store options calls, along with the benchmarked
function."""

def __init__(self, func):
self.func = func
Expand All @@ -83,8 +84,8 @@ def __builder_method(*args, **kwargs):
def __decorator(func_or_options):
options = self.make(func_or_options)
options.builder_calls.append((builder_name, args, kwargs))
# The decorator returns Options so it is not technically a decorator
# and needs a final call to @register
# The decorator returns Options so it is not technically a
# decorator and needs a final call to @register
return options

return __decorator
Expand All @@ -93,8 +94,8 @@ def __decorator(func_or_options):


# Alias for nicer API.
# We have to instantiate an object, even if stateless, to be able to use __getattr__
# on option.range
# We have to instantiate an object, even if stateless, to be able to use
# __getattr__ on option.range
option = __OptionMaker()


Expand All @@ -104,8 +105,8 @@ def register(undefined=None, *, name=None):
# Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name)

# We have either the function to benchmark (simple case) or an instance of Options
# (@option._ case).
# We have either the function to benchmark (simple case) or an instance of
# Options (@option._ case).
options = __OptionMaker.make(undefined)

if name is None:
Expand Down
6 changes: 3 additions & 3 deletions bindings/python/google_benchmark/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@
# limitations under the License.
"""Example of Python using C++ benchmark framework.
To run this example, you must first install the `google_benchmark` Python package.
To run this example, you must first install the `google_benchmark` Python
package.
To install using `setup.py`, download and extract the `google_benchmark` source.
In the extracted directory, execute:
Expand Down Expand Up @@ -57,7 +58,7 @@ def skipped(state):
state.skip_with_error("some error")
return # NOTE: You must explicitly return, or benchmark will continue.

... # Benchmark code would be here.
# Benchmark code would be here.


@benchmark.register
Expand All @@ -78,7 +79,6 @@ def custom_counters(state):
num_foo = 0.0
while state:
# Benchmark some code here
pass
# Collect some custom metric named foo
num_foo += 0.13

Expand Down
5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,10 @@ target-version = "py311"

[tool.ruff.lint]
# Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default.
select = ["E", "F", "I", "W"]
select = ["ASYNC", "B", "C4", "C90", "E", "F", "I", "PERF", "PIE", "PT018", "RUF", "SIM", "UP", "W"]
ignore = [
"E501", # line too long
"PLW2901", # redefined-loop-name
"UP031", # printf-string-formatting
]

[tool.ruff.lint.isort]
Expand Down
14 changes: 6 additions & 8 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,9 @@
import re
import shutil
import sys
from collections.abc import Generator
from pathlib import Path
from typing import Any, Generator
from typing import Any

import setuptools
from setuptools.command import build_ext
Expand Down Expand Up @@ -86,15 +87,14 @@ def copy_extensions_to_source(self):
This is done in the ``bazel_build`` method, so it's not necessary to
do again in the `build_ext` base class.
"""
pass

def bazel_build(self, ext: BazelExtension) -> None:
def bazel_build(self, ext: BazelExtension) -> None: # noqa: C901
"""Runs the bazel build to create the package."""
temp_path = Path(self.build_temp)

# We round to the minor version, which makes rules_python
# look up the latest available patch version internally.
python_version = "{0}.{1}".format(*sys.version_info[:2])
python_version = "{}.{}".format(*sys.version_info[:2])

bazel_argv = [
"bazel",
Expand Down Expand Up @@ -142,9 +142,7 @@ def bazel_build(self, ext: BazelExtension) -> None:
# we do not want the bare .so file included
# when building for ABI3, so we require a
# full and exact match on the file extension.
if "".join(fp.suffixes) == suffix:
should_copy = True
elif fp.suffix == ".pyi":
if "".join(fp.suffixes) == suffix or fp.suffix == ".pyi":
should_copy = True
elif Path(root) == srcdir and f == "py.typed":
# copy py.typed, but only at the package root.
Expand All @@ -155,7 +153,7 @@ def bazel_build(self, ext: BazelExtension) -> None:


setuptools.setup(
cmdclass=dict(build_ext=BuildBazelExtension),
cmdclass={"build_ext": BuildBazelExtension},
package_data={"google_benchmark": ["py.typed", "*.pyi"]},
ext_modules=[
BazelExtension(
Expand Down
9 changes: 5 additions & 4 deletions src/benchmark_runner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -438,9 +438,7 @@ MemoryManager::Result* BenchmarkRunner::RunMemoryManager(
return memory_result;
}

void BenchmarkRunner::RunProfilerManager() {
// TODO: Provide a way to specify the number of iterations.
IterationCount profile_iterations = 1;
void BenchmarkRunner::RunProfilerManager(IterationCount profile_iterations) {
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(1));
b.Setup();
Expand Down Expand Up @@ -507,7 +505,10 @@ void BenchmarkRunner::DoOneRepetition() {
}

if (profiler_manager != nullptr) {
RunProfilerManager();
// We want to externally profile the benchmark for the same number of
// iterations because, for example, if we're tracing the benchmark then we
// want trace data to reasonably match PMU data.
RunProfilerManager(iters);
}

// Ok, now actually report.
Expand Down
3 changes: 1 addition & 2 deletions src/benchmark_runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
#include <vector>

#include "benchmark_api_internal.h"
#include "internal_macros.h"
#include "perf_counters.h"
#include "thread_manager.h"

Expand Down Expand Up @@ -109,7 +108,7 @@ class BenchmarkRunner {

MemoryManager::Result* RunMemoryManager(IterationCount memory_iterations);

void RunProfilerManager();
void RunProfilerManager(IterationCount profile_iterations);

IterationCount PredictNumItersNeeded(const IterationResults& i) const;

Expand Down
1 change: 0 additions & 1 deletion src/string_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
#include "benchmark/benchmark.h"
#include "benchmark/export.h"
#include "check.h"
#include "internal_macros.h"

namespace benchmark {

Expand Down
3 changes: 3 additions & 0 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,9 @@ benchmark_add_test(NAME memory_manager_test COMMAND memory_manager_test --benchm
compile_output_test(profiler_manager_test)
benchmark_add_test(NAME profiler_manager_test COMMAND profiler_manager_test --benchmark_min_time=0.01s)

compile_benchmark_test(profiler_manager_iterations_test)
benchmark_add_test(NAME profiler_manager_iterations COMMAND profiler_manager_iterations_test)

# MSVC does not allow to set the language standard to C++98/03.
if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"))
compile_benchmark_test(cxx03_test)
Expand Down
61 changes: 61 additions & 0 deletions test/profiler_manager_iterations_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
#include <cassert>
#include <cstdlib>
#include <memory>
#include <vector>

#include "benchmark/benchmark.h"

// Tests that we can specify the number of profiler iterations with
// --benchmark_min_time=<NUM>x.
namespace {

int iteration_count = 0;
int end_profiler_iteration_count = 0;

class TestProfilerManager : public benchmark::ProfilerManager {
void AfterSetupStart() override { iteration_count = 0; }
void BeforeTeardownStop() override {
end_profiler_iteration_count = iteration_count;
}
};

class NullReporter : public benchmark::BenchmarkReporter {
public:
bool ReportContext(const Context& /*context*/) override { return true; }
void ReportRuns(const std::vector<Run>& /* report */) override {}
};

} // end namespace

static void BM_MyBench(benchmark::State& state) {
for (auto s : state) {
++iteration_count;
}
}
BENCHMARK(BM_MyBench);

int main(int argc, char** argv) {
// Make a fake argv and append the new --benchmark_profiler_iterations=<foo>
// to it.
int fake_argc = argc + 1;
const char** fake_argv = new const char*[static_cast<size_t>(fake_argc)];
for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i];
fake_argv[argc] = "--benchmark_min_time=4x";

std::unique_ptr<benchmark::ProfilerManager> pm(new TestProfilerManager());
benchmark::RegisterProfilerManager(pm.get());

benchmark::Initialize(&fake_argc, const_cast<char**>(fake_argv));

NullReporter null_reporter;
const size_t returned_count =
benchmark::RunSpecifiedBenchmarks(&null_reporter, "BM_MyBench");
assert(returned_count == 1);

// Check the executed iters.
assert(end_profiler_iteration_count == 4);

benchmark::RegisterProfilerManager(nullptr);
delete[] fake_argv;
return 0;
}
1 change: 1 addition & 0 deletions test/profiler_manager_test.cc
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
// FIXME: WIP

#include <cassert>
#include <memory>

#include "benchmark/benchmark.h"
Expand Down
35 changes: 28 additions & 7 deletions tools/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,10 @@ def create_parser():
"-d",
"--dump_to_json",
dest="dump_to_json",
help="Additionally, dump benchmark comparison output to this file in JSON format.",
help=(
"Additionally, dump benchmark comparison output to this file in"
" JSON format."
),
)

utest = parser.add_argument_group()
Expand All @@ -94,8 +97,15 @@ def create_parser():
dest="utest",
default=True,
action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(
report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS
help=(
"The tool can do a two-tailed Mann-Whitney U test with the null"
" hypothesis that it is equally likely that a randomly selected"
" value from one sample will be less than or greater than a"
" randomly selected value from a second sample.\nWARNING: requires"
f" **LARGE** (no less than {report.UTEST_OPTIMAL_REPETITIONS})"
" number of repetitions to be meaningful!\nThe test is being done"
f" by default, if at least {report.UTEST_MIN_REPETITIONS}"
" repetitions were done.\nThis option can disable the U Test."
),
)
alpha_default = 0.05
Expand All @@ -105,7 +115,9 @@ def create_parser():
default=alpha_default,
type=float,
help=(
"significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)"
"significance level alpha. if the calculated p-value is below this"
" value, then the result is said to be statistically significant"
" and the null hypothesis is rejected.\n(default: %0.4f)"
)
% alpha_default,
)
Expand All @@ -116,7 +128,10 @@ def create_parser():

parser_a = subparsers.add_parser(
"benchmarks",
help="The most simple use-case, compare all the output of these two benchmarks",
help=(
"The most simple use-case, compare all the output of these two"
" benchmarks"
),
)
baseline = parser_a.add_argument_group("baseline", "The benchmark baseline")
baseline.add_argument(
Expand Down Expand Up @@ -180,7 +195,10 @@ def create_parser():

parser_c = subparsers.add_parser(
"benchmarksfiltered",
help="Compare filter one of first benchmark with filter two of the second benchmark",
help=(
"Compare filter one of first benchmark with filter two of the"
" second benchmark"
),
)
baseline = parser_c.add_argument_group("baseline", "The benchmark baseline")
baseline.add_argument(
Expand All @@ -205,7 +223,10 @@ def create_parser():
metavar="test_contender",
type=argparse.FileType("r"),
nargs=1,
help="The second benchmark executable or JSON output file, that will be compared against the baseline",
help=(
"The second benchmark executable or JSON output file, that will be"
" compared against the baseline"
),
)
contender.add_argument(
"filter_contender",
Expand Down
Loading

0 comments on commit 20fcc91

Please sign in to comment.