-
Notifications
You must be signed in to change notification settings - Fork 9
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix stdout
/stderr
redirects
#25
Changes from all commits
84b7619
d12f6b7
3d3ec77
9c131c1
003bbe6
887ad44
35ab172
14446c1
59ea7ad
2b026dd
7f320b9
1b9a580
d3d8405
26b5d09
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1,2 @@ | ||
dist_callgrind_wrapper.* | ||
build.lock |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,6 @@ | ||
from __future__ import annotations | ||
|
||
import functools | ||
import gc | ||
import os | ||
import pkgutil | ||
|
@@ -16,11 +17,12 @@ | |
from ._wrapper import get_lib | ||
|
||
if TYPE_CHECKING: | ||
from typing import Any, Callable, TypeVar | ||
from typing import Any, Callable, Iterator, ParamSpec, TypeVar | ||
|
||
from ._wrapper import LibType | ||
|
||
T = TypeVar("T") | ||
P = ParamSpec("P") | ||
|
||
IS_PYTEST_BENCHMARK_INSTALLED = pkgutil.find_loader("pytest_benchmark") is not None | ||
SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12) | ||
|
@@ -170,88 +172,58 @@ def pytest_collection_modifyitems( | |
items[:] = selected | ||
|
||
|
||
def _run_with_instrumentation( | ||
def wrap_pyfunc_with_instrumentation( | ||
lib: LibType, | ||
nodeId: str, | ||
nodeid: str, | ||
config: pytest.Config, | ||
fn: Callable[..., Any], | ||
*args, | ||
**kwargs, | ||
): | ||
is_gc_enabled = gc.isenabled() | ||
if is_gc_enabled: | ||
gc.collect() | ||
gc.disable() | ||
|
||
result = None | ||
|
||
def __codspeed_root_frame__(): | ||
nonlocal result | ||
result = fn(*args, **kwargs) | ||
|
||
if SUPPORTS_PERF_TRAMPOLINE: | ||
# Warmup CPython performance map cache | ||
__codspeed_root_frame__() | ||
lib.zero_stats() | ||
lib.start_instrumentation() | ||
__codspeed_root_frame__() | ||
lib.stop_instrumentation() | ||
uri = get_git_relative_uri(nodeId, config.rootpath) | ||
lib.dump_stats_at(uri.encode("ascii")) | ||
if is_gc_enabled: | ||
gc.enable() | ||
|
||
return result | ||
|
||
|
||
@pytest.hookimpl(tryfirst=True) | ||
def pytest_runtest_protocol(item: pytest.Item, nextitem: pytest.Item | None): | ||
plugin = get_plugin(item.config) | ||
if not plugin.is_codspeed_enabled or not should_benchmark_item(item): | ||
return ( | ||
None # Defer to the default test protocol since no benchmarking is needed | ||
) | ||
|
||
if has_benchmark_fixture(item): | ||
return None # Instrumentation is handled by the fixture | ||
|
||
plugin.benchmark_count += 1 | ||
if not plugin.should_measure: | ||
return None # Benchmark counted but will be run in the default protocol | ||
|
||
# Setup phase | ||
reports = [] | ||
ihook = item.ihook | ||
ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) | ||
setup_call = pytest.CallInfo.from_call( | ||
lambda: ihook.pytest_runtest_setup(item=item, nextitem=nextitem), "setup" | ||
) | ||
setup_report = ihook.pytest_runtest_makereport(item=item, call=setup_call) | ||
ihook.pytest_runtest_logreport(report=setup_report) | ||
reports.append(setup_report) | ||
# Run phase | ||
if setup_report.passed and not item.config.getoption("setuponly"): | ||
assert plugin.lib is not None | ||
runtest_call = pytest.CallInfo.from_call( | ||
lambda: _run_with_instrumentation( | ||
plugin.lib, item.nodeid, item.config, item.runtest | ||
), | ||
"call", | ||
) | ||
runtest_report = ihook.pytest_runtest_makereport(item=item, call=runtest_call) | ||
ihook.pytest_runtest_logreport(report=runtest_report) | ||
reports.append(runtest_report) | ||
|
||
# Teardown phase | ||
teardown_call = pytest.CallInfo.from_call( | ||
lambda: ihook.pytest_runtest_teardown(item=item, nextitem=nextitem), "teardown" | ||
) | ||
teardown_report = ihook.pytest_runtest_makereport(item=item, call=teardown_call) | ||
ihook.pytest_runtest_logreport(report=teardown_report) | ||
reports.append(teardown_report) | ||
ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) | ||
|
||
return reports # Deny further protocol hooks execution | ||
testfunction: Callable[P, T], | ||
) -> Callable[P, T]: | ||
@functools.wraps(testfunction) | ||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: | ||
def __codspeed_root_frame__(): | ||
return testfunction(*args, **kwargs) | ||
|
||
is_gc_enabled = gc.isenabled() | ||
if is_gc_enabled: | ||
gc.collect() | ||
gc.disable() | ||
try: | ||
if SUPPORTS_PERF_TRAMPOLINE: | ||
# Warmup CPython performance map cache | ||
__codspeed_root_frame__() | ||
|
||
lib.zero_stats() | ||
lib.start_instrumentation() | ||
try: | ||
return __codspeed_root_frame__() | ||
finally: | ||
lib.stop_instrumentation() | ||
uri = get_git_relative_uri(nodeid, config.rootpath) | ||
lib.dump_stats_at(uri.encode("ascii")) | ||
Comment on lines
+195
to
+202
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. since you introduced this try block, can you add a test bench raising an exception? |
||
finally: | ||
if is_gc_enabled: | ||
gc.enable() | ||
|
||
return wrapper | ||
|
||
|
||
@pytest.hookimpl(hookwrapper=True) | ||
def pytest_pyfunc_call(pyfuncitem: pytest.Function) -> Iterator[None]: | ||
plugin = get_plugin(pyfuncitem.config) | ||
if ( | ||
plugin.is_codspeed_enabled | ||
and should_benchmark_item(pyfuncitem) | ||
and not has_benchmark_fixture(pyfuncitem) | ||
): | ||
plugin.benchmark_count += 1 | ||
if plugin.lib is not None and plugin.should_measure: | ||
pyfuncitem.obj = wrap_pyfunc_with_instrumentation( | ||
plugin.lib, | ||
pyfuncitem.nodeid, | ||
pyfuncitem.config, | ||
pyfuncitem.obj, | ||
) | ||
yield | ||
Comment on lines
+210
to
+226
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Love this refactor! |
||
|
||
|
||
class BenchmarkFixture: | ||
|
@@ -266,11 +238,17 @@ def __call__(self, func: Callable[..., T], *args: Any, **kwargs: Any) -> T: | |
config = self._request.config | ||
plugin = get_plugin(config) | ||
plugin.benchmark_count += 1 | ||
if plugin.is_codspeed_enabled and plugin.should_measure: | ||
assert plugin.lib is not None | ||
return _run_with_instrumentation( | ||
plugin.lib, self._request.node.nodeid, config, func, *args, **kwargs | ||
) | ||
if ( | ||
plugin.is_codspeed_enabled | ||
and plugin.lib is not None | ||
and plugin.should_measure | ||
): | ||
return wrap_pyfunc_with_instrumentation( | ||
plugin.lib, | ||
self._request.node.nodeid, | ||
config, | ||
func, | ||
)(*args, **kwargs) | ||
Comment on lines
+241
to
+251
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. When used with the fixture, the fixture payload shouldn't be executed wrapped again within pyfunc_call since it's already wrapped by the test itself. for example, this would probably fail because of the warmup: # This doesn't have any pytest marker but defines a bench since its using the fixture
def test_bench(benchmark):
# ... some preprocessing
called_once = False
@benchmark
def _():
nonlocal called_once
if not called_once:
called_once = True
else:
raise Exception("bench codewas called twice but actual bench context only once") There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Don't hesitate to add that as an additional test There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I started to get really confused with the intention here since the above example also fails with I suspect we need to do something similar to what |
||
else: | ||
return func(*args, **kwargs) | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think the warmup shouldn't be included in the wrapper but handled at the protocol level
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ah, this also explains the issue we ran into on our own trying to use
tmp_path
in benchmark tests, e.g.: