Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix stdout/stderr redirects #25

Closed
wants to merge 14 commits into from
Closed
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@ repos:
rev: v0.3.3
hooks:
- id: ruff
- id: ruff-format
- id: ruff-format
5 changes: 4 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,12 @@ include = ["/src"]
python_version = "3.12"

[tool.ruff]
target-version = "py37"
line-length = 88

[tool.ruff.lint]
select = ["E", "F", "I", "C"]
select = ["E", "F", "I", "C", "TCH", "FA", "UP"]
flake8-type-checking = {exempt-modules = [], strict = true}

[tool.isort]
line_length = 88
Expand Down
1 change: 1 addition & 0 deletions src/pytest_codspeed/_wrapper/.gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
dist_callgrind_wrapper.*
build.lock
4 changes: 3 additions & 1 deletion src/pytest_codspeed/_wrapper/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import annotations

import os
from typing import TYPE_CHECKING

Expand All @@ -22,7 +24,7 @@ def _get_ffi():
return ffi


def get_lib() -> "LibType":
def get_lib() -> LibType:
try:
ffi = _get_ffi()
build_lock = FileLock(f"{_wrapper_dir}/build.lock")
Expand Down
209 changes: 90 additions & 119 deletions src/pytest_codspeed/plugin.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,12 @@
from __future__ import annotations

import functools
import gc
import os
import pkgutil
import sys
from dataclasses import dataclass, field
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
TypeVar,
Union,
)
from typing import TYPE_CHECKING

import pytest
from _pytest.fixtures import FixtureManager
Expand All @@ -24,15 +17,20 @@
from ._wrapper import get_lib

if TYPE_CHECKING:
from typing import Any, Callable, Iterator, ParamSpec, TypeVar

from ._wrapper import LibType

T = TypeVar("T")
P = ParamSpec("P")

IS_PYTEST_BENCHMARK_INSTALLED = pkgutil.find_loader("pytest_benchmark") is not None
SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12)
BEFORE_PYTEST_8_1_1 = pytest.version_tuple < (8, 1, 1)


@pytest.hookimpl(trylast=True)
def pytest_addoption(parser: "pytest.Parser"):
def pytest_addoption(parser: pytest.Parser):
group = parser.getgroup("CodSpeed benchmarking")
group.addoption(
"--codspeed",
Expand All @@ -46,20 +44,20 @@ def pytest_addoption(parser: "pytest.Parser"):
class CodSpeedPlugin:
is_codspeed_enabled: bool
should_measure: bool
lib: Optional["LibType"]
disabled_plugins: Tuple[str, ...]
lib: LibType | None
disabled_plugins: tuple[str, ...]
benchmark_count: int = field(default=0, hash=False, compare=False)


PLUGIN_NAME = "codspeed_plugin"


def get_plugin(config: "pytest.Config") -> "CodSpeedPlugin":
def get_plugin(config: pytest.Config) -> CodSpeedPlugin:
return config.pluginmanager.get_plugin(PLUGIN_NAME)


@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: "pytest.Config"):
def pytest_configure(config: pytest.Config):
config.addinivalue_line(
"markers", "codspeed_benchmark: mark an entire test for codspeed benchmarking"
)
Expand All @@ -73,7 +71,7 @@ def pytest_configure(config: "pytest.Config"):
lib = get_lib() if should_measure else None
if lib is not None:
lib.dump_stats_at(f"Metadata: pytest-codspeed {__version__}".encode("ascii"))
disabled_plugins: List[str] = []
disabled_plugins: list[str] = []
# Disable pytest-benchmark if codspeed is enabled
if is_codspeed_enabled and IS_PYTEST_BENCHMARK_INSTALLED:
object.__setattr__(config.option, "benchmark_disable", True)
Expand All @@ -89,7 +87,7 @@ def pytest_configure(config: "pytest.Config"):
config.pluginmanager.register(plugin, PLUGIN_NAME)


def pytest_plugin_registered(plugin, manager: "pytest.PytestPluginManager"):
def pytest_plugin_registered(plugin, manager: pytest.PytestPluginManager):
"""Patch the benchmark fixture to use the codspeed one if codspeed is enabled"""
if IS_PYTEST_BENCHMARK_INSTALLED and isinstance(plugin, FixtureManager):
fixture_manager = plugin
Expand All @@ -111,7 +109,7 @@ def pytest_plugin_registered(plugin, manager: "pytest.PytestPluginManager"):


@pytest.hookimpl(trylast=True)
def pytest_report_header(config: "pytest.Config"):
def pytest_report_header(config: pytest.Config):
out = [
f"codspeed: {__version__} "
f"(callgraph: {'enabled' if SUPPORTS_PERF_TRAMPOLINE else 'not supported'})"
Expand All @@ -132,24 +130,24 @@ def pytest_report_header(config: "pytest.Config"):
return "\n".join(out)


def has_benchmark_fixture(item: "pytest.Item") -> bool:
def has_benchmark_fixture(item: pytest.Item) -> bool:
item_fixtures = getattr(item, "fixturenames", [])
return "benchmark" in item_fixtures or "codspeed_benchmark" in item_fixtures


def has_benchmark_marker(item: "pytest.Item") -> bool:
def has_benchmark_marker(item: pytest.Item) -> bool:
return (
item.get_closest_marker("codspeed_benchmark") is not None
or item.get_closest_marker("benchmark") is not None
)


def should_benchmark_item(item: "pytest.Item") -> bool:
def should_benchmark_item(item: pytest.Item) -> bool:
return has_benchmark_fixture(item) or has_benchmark_marker(item)


@pytest.hookimpl()
def pytest_sessionstart(session: "pytest.Session"):
def pytest_sessionstart(session: pytest.Session):
plugin = get_plugin(session.config)
if plugin.is_codspeed_enabled:
plugin.benchmark_count = 0
Expand All @@ -159,7 +157,7 @@ def pytest_sessionstart(session: "pytest.Session"):

@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(
session: "pytest.Session", config: "pytest.Config", items: "List[pytest.Item]"
session: pytest.Session, config: pytest.Config, items: list[pytest.Item]
):
plugin = get_plugin(config)
if plugin.is_codspeed_enabled:
Expand All @@ -174,131 +172,104 @@ def pytest_collection_modifyitems(
items[:] = selected


def _run_with_instrumentation(
lib: "LibType",
nodeId: str,
config: "pytest.Config",
fn: Callable[..., Any],
*args,
**kwargs,
):
is_gc_enabled = gc.isenabled()
if is_gc_enabled:
gc.collect()
gc.disable()

result = None

def __codspeed_root_frame__():
nonlocal result
result = fn(*args, **kwargs)

if SUPPORTS_PERF_TRAMPOLINE:
# Warmup CPython performance map cache
__codspeed_root_frame__()
lib.zero_stats()
lib.start_instrumentation()
__codspeed_root_frame__()
lib.stop_instrumentation()
uri = get_git_relative_uri(nodeId, config.rootpath)
lib.dump_stats_at(uri.encode("ascii"))
if is_gc_enabled:
gc.enable()

return result


@pytest.hookimpl(tryfirst=True)
def pytest_runtest_protocol(item: "pytest.Item", nextitem: Union["pytest.Item", None]):
plugin = get_plugin(item.config)
if not plugin.is_codspeed_enabled or not should_benchmark_item(item):
return (
None # Defer to the default test protocol since no benchmarking is needed
)

if has_benchmark_fixture(item):
return None # Instrumentation is handled by the fixture

plugin.benchmark_count += 1
if not plugin.should_measure:
return None # Benchmark counted but will be run in the default protocol

# Setup phase
reports = []
ihook = item.ihook
ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
setup_call = pytest.CallInfo.from_call(
lambda: ihook.pytest_runtest_setup(item=item, nextitem=nextitem), "setup"
)
setup_report = ihook.pytest_runtest_makereport(item=item, call=setup_call)
ihook.pytest_runtest_logreport(report=setup_report)
reports.append(setup_report)
# Run phase
if setup_report.passed and not item.config.getoption("setuponly"):
assert plugin.lib is not None
runtest_call = pytest.CallInfo.from_call(
lambda: _run_with_instrumentation(
plugin.lib, item.nodeid, item.config, item.runtest
),
"call",
)
runtest_report = ihook.pytest_runtest_makereport(item=item, call=runtest_call)
ihook.pytest_runtest_logreport(report=runtest_report)
reports.append(runtest_report)

# Teardown phase
teardown_call = pytest.CallInfo.from_call(
lambda: ihook.pytest_runtest_teardown(item=item, nextitem=nextitem), "teardown"
)
teardown_report = ihook.pytest_runtest_makereport(item=item, call=teardown_call)
ihook.pytest_runtest_logreport(report=teardown_report)
reports.append(teardown_report)
ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)

return reports # Deny further protocol hooks execution


T = TypeVar("T")
def wrap_pyfunc_with_instrumentation(
lib: LibType,
nodeid: str,
config: pytest.Config,
testfunction: Callable[P, T],
) -> Callable[P, T]:
@functools.wraps(testfunction)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
def __codspeed_root_frame__():
return testfunction(*args, **kwargs)

is_gc_enabled = gc.isenabled()
if is_gc_enabled:
gc.collect()
gc.disable()
try:
if SUPPORTS_PERF_TRAMPOLINE:
# Warmup CPython performance map cache
__codspeed_root_frame__()
Comment on lines +191 to +193
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the warmup shouldn't be included in the wrapper but handled at the protocol level

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, this also explains the issue we ran into on our own trying to use tmp_path in benchmark tests, e.g.:

@pytest.mark.benchmark
def test_tmp_path_benchmark(tmp_path: Path):
    tmp_path.mkdir()


lib.zero_stats()
lib.start_instrumentation()
try:
return __codspeed_root_frame__()
finally:
lib.stop_instrumentation()
uri = get_git_relative_uri(nodeid, config.rootpath)
lib.dump_stats_at(uri.encode("ascii"))
Comment on lines +195 to +202
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

since you introduced this try block, can you add a test bench raising an exception?

finally:
if is_gc_enabled:
gc.enable()

return wrapper


@pytest.hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem: pytest.Function) -> Iterator[None]:
plugin = get_plugin(pyfuncitem.config)
if (
plugin.is_codspeed_enabled
and should_benchmark_item(pyfuncitem)
and not has_benchmark_fixture(pyfuncitem)
):
plugin.benchmark_count += 1
if plugin.lib is not None and plugin.should_measure:
pyfuncitem.obj = wrap_pyfunc_with_instrumentation(
plugin.lib,
pyfuncitem.nodeid,
pyfuncitem.config,
pyfuncitem.obj,
)
yield
Comment on lines +210 to +226
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Love this refactor!
However, when a benchmark is defined from a fixture, we should still perform the warmup at this level and not in the benchmark fixture(see the other comment I left on it).



class BenchmarkFixture:
"""The fixture that can be used to benchmark a function."""

def __init__(self, request: "pytest.FixtureRequest"):
self.extra_info: Dict = {}
def __init__(self, request: pytest.FixtureRequest):
self.extra_info: dict = {}

self._request = request

def __call__(self, func: Callable[..., T], *args: Any, **kwargs: Any) -> T:
config = self._request.config
plugin = get_plugin(config)
plugin.benchmark_count += 1
if plugin.is_codspeed_enabled and plugin.should_measure:
assert plugin.lib is not None
return _run_with_instrumentation(
plugin.lib, self._request.node.nodeid, config, func, *args, **kwargs
)
if (
plugin.is_codspeed_enabled
and plugin.lib is not None
and plugin.should_measure
):
return wrap_pyfunc_with_instrumentation(
plugin.lib,
self._request.node.nodeid,
config,
func,
)(*args, **kwargs)
Comment on lines +241 to +251
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When used with the fixture, the fixture payload shouldn't be executed wrapped again within pyfunc_call since it's already wrapped by the test itself.

for example, this would probably fail because of the warmup:

# This doesn't have any pytest marker but defines a bench since its using the fixture
def test_bench(benchmark):
    # ... some preprocessing
    called_once = False

    @benchmark
    def _():
        nonlocal called_once
        if not called_once:
            called_once = True
        else:
            raise Exception("bench codewas called twice but actual bench context only once")

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't hesitate to add that as an additional test

Copy link
Contributor Author

@kenodegard kenodegard Mar 26, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I started to get really confused with the intention here since the above example also fails with master, see #30

I suspect we need to do something similar to what pytest-rerunfailures does to clear the cached results between the cache priming run and the instrumented run: https://github.com/pytest-dev/pytest-rerunfailures/blob/a53b9344c0d7a491a3cc53d91c7319696651d21b/src/pytest_rerunfailures.py#L565-L567

else:
return func(*args, **kwargs)


@pytest.fixture(scope="function")
def codspeed_benchmark(request: "pytest.FixtureRequest") -> Callable:
def codspeed_benchmark(request: pytest.FixtureRequest) -> Callable:
return BenchmarkFixture(request)


if not IS_PYTEST_BENCHMARK_INSTALLED:

@pytest.fixture(scope="function")
def benchmark(codspeed_benchmark, request: "pytest.FixtureRequest"):
def benchmark(codspeed_benchmark, request: pytest.FixtureRequest):
"""
Compatibility with pytest-benchmark
"""
return codspeed_benchmark


@pytest.hookimpl()
def pytest_sessionfinish(session: "pytest.Session", exitstatus):
def pytest_sessionfinish(session: pytest.Session, exitstatus):
plugin = get_plugin(session.config)
if plugin.is_codspeed_enabled:
reporter = session.config.pluginmanager.get_plugin("terminalreporter")
Expand Down
Loading
Loading