From 97580ae4bee836fe4ae569b37b4b5f48a641e92c Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Tue, 14 Nov 2023 15:12:33 -0500 Subject: [PATCH 01/22] add tests.ether --- tests/conftest.py | 22 +++++++++++++++++----- tests/util/misc.py | 4 ++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 9b05f46a309d..ad4d2ce7983e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -19,6 +19,7 @@ # TODO: update after resolution in https://github.com/pytest-dev/pytest/issues/7469 from _pytest.fixtures import SubRequest +from pytest import MonkeyPatch from chia.clvm.spend_sim import CostLogger @@ -62,6 +63,7 @@ from chia.util.task_timing import start_task_instrumentation, stop_task_instrumentation from chia.wallet.wallet_node import WalletNode from chia.wallet.wallet_node_api import WalletNodeAPI +from tests import ether from tests.core.data_layer.util import ChiaRoot from tests.core.node_height import node_height_at_least from tests.simulation.test_simulation import test_constants_modified @@ -77,6 +79,20 @@ from chia.util.keyring_wrapper import KeyringWrapper +@pytest.fixture(autouse=True) +def ether_record_property_fixture(record_property: Callable[[str, object], None]) -> Iterator[None]: + with MonkeyPatch.context() as monkeypatch_context: + monkeypatch_context.setattr(ether, "record_property", record_property, raising=False) + yield + + +@pytest.fixture(autouse=True) +def ether_node_name_fixture(request: SubRequest) -> Iterator[None]: + with MonkeyPatch.context() as monkeypatch_context: + monkeypatch_context.setattr(ether, "node_name", request.node.name, raising=False) + yield + + @pytest.fixture(scope="session") def anyio_backend(): return "asyncio" @@ -109,16 +125,12 @@ def benchmark_runner_overhead_fixture() -> float: @pytest.fixture(name="benchmark_runner") def benchmark_runner_fixture( - request: SubRequest, benchmark_runner_overhead: float, - record_property: Callable[[str, object], None], benchmark_repeat: int, ) -> BenchmarkRunner: - label = request.node.name return BenchmarkRunner( - label=label, + label=ether.node_name, overhead=benchmark_runner_overhead, - record_property=record_property, ) diff --git a/tests/util/misc.py b/tests/util/misc.py index 30c9bc56a21a..a3b46bc5b43b 100644 --- a/tests/util/misc.py +++ b/tests/util/misc.py @@ -28,6 +28,7 @@ from chia.util.hash import std_hash from chia.util.ints import uint64 from chia.wallet.util.compute_hints import HintedCoin +from tests import ether from tests.core.data_layer.util import ChiaRoot @@ -305,13 +306,12 @@ class BenchmarkRunner: enable_assertion: bool = True label: Optional[str] = None overhead: Optional[float] = None - record_property: Optional[Callable[[str, object], None]] = None @functools.wraps(_AssertRuntime) def assert_runtime(self, *args: Any, **kwargs: Any) -> _AssertRuntime: kwargs.setdefault("enable_assertion", self.enable_assertion) kwargs.setdefault("overhead", self.overhead) - kwargs.setdefault("record_property", self.record_property) + kwargs.setdefault("record_property", ether.record_property) return _AssertRuntime(*args, **kwargs) From 91bf4381bb819d068669ec246633a88ae06bcc6d Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Tue, 14 Nov 2023 15:21:16 -0500 Subject: [PATCH 02/22] add new tests/ether.py --- tests/ether.py | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 tests/ether.py diff --git a/tests/ether.py b/tests/ether.py new file mode 100644 index 000000000000..3ae3bdc47405 --- /dev/null +++ b/tests/ether.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from typing import Callable + +record_property: Callable[[str, object], None] +node_name: str From a50b22158390f7e40cb8ce21049f3b5ee9b6de1a Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 08:50:37 -0500 Subject: [PATCH 03/22] go time out assert --- .github/CODEOWNERS | 1 + .github/workflows/benchmarks.yml | 3 +- .github/workflows/test.yml | 6 + chia/simulator/time_out_assert.py | 102 ++++++++++++-- chia/util/misc.py | 7 + tests/conftest.py | 41 ++++-- tests/ether.py | 15 ++- tests/process_benchmarks.py | 217 ++++++++++++++++++++++-------- tests/util/misc.py | 198 ++++++++++++++++++++++++--- 9 files changed, 493 insertions(+), 97 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 39e84a7ed841..ad9920b5062f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,3 +2,4 @@ /.github/* @Chia-Network/actions-reviewers /PRETTY_GOOD_PRACTICES.md @altendky @Chia-Network/required-reviewers /pylintrc @altendky @Chia-Network/required-reviewers +/tests/ether.py @altendky @Chia-Network/required-reviewers diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 35876ae0090a..cbcb92939071 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -124,4 +124,5 @@ jobs: - name: Add benchmark results to workflow summary if: always() run: | - python -m tests.process_benchmarks --xml junit-data/benchmarks.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" + # TODO: rename the file since it is more general + python -m tests.process_benchmarks --type benchmark --xml junit-data/benchmarks.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e1b95cc22f54..4444030a1fa7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -147,6 +147,12 @@ jobs: path: junit-results/* if-no-files-found: error + - name: Add time out assert results to workflow summary + if: always() + run: | + # TODO: rename the file since it is more general + python -m tests.process_benchmarks --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" + - name: Download Coverage uses: actions/download-artifact@v3 with: diff --git a/chia/simulator/time_out_assert.py b/chia/simulator/time_out_assert.py index 9e400fdd5fe0..6cc8fb69d723 100644 --- a/chia/simulator/time_out_assert.py +++ b/chia/simulator/time_out_assert.py @@ -1,13 +1,18 @@ from __future__ import annotations import asyncio +import dataclasses +import json import logging import os +import pathlib import sys import time -from typing import Callable, Optional, overload +from typing import Any, Callable, ClassVar, Dict, Optional, Tuple, final, overload +import chia from chia.protocols.protocol_message_types import ProtocolMessageTypes +from chia.util.misc import caller_file_and_line log = logging.getLogger(__name__) @@ -51,21 +56,98 @@ def adjusted_timeout(timeout: Optional[float]) -> Optional[float]: return timeout + _system_delay +@final +@dataclasses.dataclass(frozen=True) +class TimeOutAssertData: + # TODO: deal with import directions etc so we can check this + # if TYPE_CHECKING: + # _protocol_check: ClassVar[DataTypeProtocol] = cast("TimeOutAssertData", None) + + tag: ClassVar[str] = "time_out_assert" + + duration: float + path: pathlib.Path + line: int + limit: float + timed_out: bool + + # TODO: can we make this not required maybe? + label: str = "" + + __match_args__: ClassVar[Tuple[str, ...]] = () + + @classmethod + def unmarshal(cls, marshalled: Dict[str, Any]) -> TimeOutAssertData: + return cls( + duration=marshalled["duration"], + path=pathlib.Path(marshalled["path"]), + line=int(marshalled["line"]), + limit=marshalled["limit"], + timed_out=marshalled["timed_out"], + ) + + def marshal(self) -> Dict[str, Any]: + return { + "duration": self.duration, + "path": self.path.as_posix(), + "line": self.line, + "limit": self.limit, + "timed_out": self.timed_out, + } + + async def time_out_assert_custom_interval(timeout: float, interval, function, value=True, *args, **kwargs): __tracebackhide__ = True + # TODO: wrong line when not called directly but instead from the regular time_out_assert? + entry_file, entry_line = caller_file_and_line() + timeout = adjusted_timeout(timeout=timeout) - start = time.time() - while time.time() - start < timeout: - if asyncio.iscoroutinefunction(function): - f_res = await function(*args, **kwargs) + start = time.monotonic() + duration = 0.0 + timed_out = False + try: + while True: + if asyncio.iscoroutinefunction(function): + f_res = await function(*args, **kwargs) + else: + f_res = function(*args, **kwargs) + + if value == f_res: + return None + + now = time.monotonic() + duration = now - start + + if duration > timeout: + timed_out = True + assert False, f"Timed assertion timed out after {timeout} seconds: expected {value!r}, got {f_res!r}" + + await asyncio.sleep(min(interval, timeout - duration)) + finally: + try: + # TODO: this import is going the wrong direction + from tests import ether + except ImportError: + pass else: - f_res = function(*args, **kwargs) - if value == f_res: - return None - await asyncio.sleep(interval) - assert False, f"Timed assertion timed out after {timeout} seconds: expected {value!r}, got {f_res!r}" + if ether.record_property is not None: + # name = JunitPropertyName(tag=TimeOutAssertData.tag, id=ether.test_id) + + data = TimeOutAssertData( + duration=duration, + path=pathlib.Path(entry_file).relative_to(pathlib.Path(chia.__file__).parent.parent), + line=entry_line, + limit=timeout, + timed_out=timed_out, + ) + + ether.record_property( + # json.dumps(name.marshal(), ensure_ascii=True, sort_keys=True), + data.tag, + json.dumps(data.marshal(), ensure_ascii=True, sort_keys=True), + ) async def time_out_assert(timeout: int, function, value=True, *args, **kwargs): diff --git a/chia/util/misc.py b/chia/util/misc.py index 903134c12d94..ec6f3791d8f9 100644 --- a/chia/util/misc.py +++ b/chia/util/misc.py @@ -7,6 +7,7 @@ import signal import sys from dataclasses import dataclass +from inspect import getframeinfo, stack from pathlib import Path from types import FrameType from typing import ( @@ -19,6 +20,7 @@ List, Optional, Sequence, + Tuple, TypeVar, Union, final, @@ -280,3 +282,8 @@ def setup_async_signal_handler(self, handler: AsyncHandler) -> None: self.setup_sync_signal_handler( handler=functools.partial(self.threadsafe_sync_signal_handler_for_async, handler=handler) ) + + +def caller_file_and_line(distance: int = 1) -> Tuple[str, int]: + caller = getframeinfo(stack()[distance + 1][0]) + return caller.filename, caller.lineno diff --git a/tests/conftest.py b/tests/conftest.py index ad4d2ce7983e..9a445e6df001 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,6 +5,7 @@ import dataclasses import datetime import functools +import json import math import multiprocessing import os @@ -67,7 +68,7 @@ from tests.core.data_layer.util import ChiaRoot from tests.core.node_height import node_height_at_least from tests.simulation.test_simulation import test_constants_modified -from tests.util.misc import BenchmarkRunner, GcMode, _AssertRuntime, measure_overhead +from tests.util.misc import BenchmarkRunner, GcMode, TestId, _AssertRuntime, measure_overhead multiprocessing.set_start_method("spawn") @@ -79,18 +80,17 @@ from chia.util.keyring_wrapper import KeyringWrapper -@pytest.fixture(autouse=True) -def ether_record_property_fixture(record_property: Callable[[str, object], None]) -> Iterator[None]: +@pytest.fixture(name="ether_setup", autouse=True) +def ether_setup_fixture(request: SubRequest, record_property: Callable[[str, object], None]) -> Iterator[None]: with MonkeyPatch.context() as monkeypatch_context: - monkeypatch_context.setattr(ether, "record_property", record_property, raising=False) + monkeypatch_context.setattr(ether, "test_id", TestId.create(node=request.node)) + monkeypatch_context.setattr(ether, "record_property", record_property) yield @pytest.fixture(autouse=True) -def ether_node_name_fixture(request: SubRequest) -> Iterator[None]: - with MonkeyPatch.context() as monkeypatch_context: - monkeypatch_context.setattr(ether, "node_name", request.node.name, raising=False) - yield +def record_test_id_property_fixture(ether_setup: None, record_property: Callable[[str, object], None]) -> None: + record_property("test_id", json.dumps(ether.test_id.marshal(), ensure_ascii=True, sort_keys=True)) @pytest.fixture(scope="session") @@ -129,7 +129,7 @@ def benchmark_runner_fixture( benchmark_repeat: int, ) -> BenchmarkRunner: return BenchmarkRunner( - label=ether.node_name, + test_id=ether.test_id, overhead=benchmark_runner_overhead, ) @@ -399,6 +399,13 @@ def pytest_addoption(parser: pytest.Parser): type=int, help=f"The number of times to run each benchmark, default {default_repeats}.", ) + group.addoption( + "--time-out-assert-repeats", + action="store", + default=default_repeats, + type=int, + help=f"The number of times to run each test with time out asserts, default {default_repeats}.", + ) def pytest_configure(config): @@ -424,6 +431,22 @@ def benchmark_repeat_fixture() -> int: globals()[benchmark_repeat_fixture.__name__] = benchmark_repeat_fixture + time_out_assert_repeats = config.getoption("--time-out-assert-repeats") + if time_out_assert_repeats != 1: + + @pytest.fixture( + name="time_out_assert_repeat", + autouse=True, + params=[ + pytest.param(repeat, id=f"time_out_assert_repeat{repeat:03d}") + for repeat in range(time_out_assert_repeats) + ], + ) + def time_out_assert_repeat_fixture(request: SubRequest) -> int: + return request.param + + globals()[time_out_assert_repeat_fixture.__name__] = time_out_assert_repeat_fixture + def pytest_collection_modifyitems(session, config: pytest.Config, items: List[pytest.Function]): # https://github.com/pytest-dev/pytest/issues/3730#issuecomment-567142496 diff --git a/tests/ether.py b/tests/ether.py index 3ae3bdc47405..4acb2c7a68d7 100644 --- a/tests/ether.py +++ b/tests/ether.py @@ -1,6 +1,15 @@ from __future__ import annotations -from typing import Callable +from typing import TYPE_CHECKING, Callable -record_property: Callable[[str, object], None] -node_name: str +if TYPE_CHECKING: + from tests.util.misc import TestId + +# NOTE: When using this module do not import the attributes directly, rather import +# something like `from tests import ether`. Importing attributes direclty will +# result in you likely getting the default `None` values since they are not +# populated until tests are running. + +# TODO: should we enforce checking every use for not None? +record_property: Callable[[str, object], None] = None # type: ignore[assignment] +test_id: TestId = None # type: ignore[assignment] diff --git a/tests/process_benchmarks.py b/tests/process_benchmarks.py index 6a570b94b583..c68badfb298d 100644 --- a/tests/process_benchmarks.py +++ b/tests/process_benchmarks.py @@ -1,5 +1,6 @@ from __future__ import annotations +import dataclasses import json import random import re @@ -7,17 +8,24 @@ from dataclasses import dataclass, field from pathlib import Path from statistics import StatisticsError, mean, stdev -from typing import Any, Dict, List, Set, TextIO, Tuple, final +from typing import Any, Dict, List, TextIO, Tuple, Type, final import click import lxml.etree +from chia.simulator.time_out_assert import TimeOutAssertData +from tests.util.misc import BenchmarkData, DataTypeProtocol, TestId + +supported_data_types: List[Type[DataTypeProtocol]] = [TimeOutAssertData, BenchmarkData] +supported_data_types_by_tag: Dict[str, Type[DataTypeProtocol]] = {cls.tag: cls for cls in supported_data_types} + @final @dataclass(frozen=True, order=True) class Result: file_path: Path test_path: Tuple[str, ...] + ids: Tuple[str, ...] label: str line: int = field(compare=False) durations: Tuple[float, ...] = field(compare=False) @@ -55,6 +63,17 @@ def sub(matchobj: re.Match[str]) -> str: return result +@final +@dataclasses.dataclass(frozen=True) +class EventId: + test_id: TestId + # TODO: this doesn't feel quite right, more a property than an identifier? i dunno + tag: str + line: int + path: Path + label: str + + @click.command(context_settings={"help_option_names": ["-h", "--help"]}) @click.option( "--xml", @@ -100,6 +119,15 @@ def sub(matchobj: re.Match[str]) -> str: help="🍿", show_default=True, ) +# TODO: subcommands? +@click.option( + "--type", + "tag", + type=click.Choice([cls.tag for cls in supported_data_types]), + help="The type of data to process", + required=True, + show_default=True, +) def main( xml_file: TextIO, link_prefix: str, @@ -108,71 +136,66 @@ def main( markdown: bool, percent_margin: int, randomoji: bool, + tag: str, ) -> None: + data_type = supported_data_types_by_tag[tag] + tree = lxml.etree.parse(xml_file) root = tree.getroot() - benchmarks = root.find("testsuite[@name='benchmarks']") - - # raw_durations: defaultdict[Tuple[str, ...], List[Result]] = defaultdict(list) - - cases_by_test_path: defaultdict[Tuple[str, ...], List[lxml.etree.Element]] = defaultdict(list) - for case in benchmarks.findall("testcase"): - raw_name = case.attrib["name"] - name = re.sub(r"(?P[-\[])benchmark_repeat\d{3}(?P[-\])])", sub, raw_name) - # TODO: seems to duplicate the class and function name, though not the parametrizations - test_path = ( - *case.attrib["classname"].split("."), - name, - ) - cases_by_test_path[test_path].append(case) - results: List[Result] = [] - for test_path, cases in cases_by_test_path.items(): - labels: Set[str] = set() + cases_by_test_id: defaultdict[TestId, List[lxml.etree.Element]] = defaultdict(list) + for suite in root.findall("testsuite"): + for case in suite.findall("testcase"): + test_id = TestId.unmarshal(json.loads(case.find("properties/property[@name='test_id']").attrib["value"])) + test_id = dataclasses.replace( + test_id, ids=tuple(id for id in test_id.ids if not id.startswith(f"{data_type.tag}_repeat")) + ) + cases_by_test_id[test_id].append(case) + + data_by_event_id: defaultdict[EventId, List[DataTypeProtocol]] = defaultdict(list) + for test_id, cases in cases_by_test_id.items(): for case in cases: - properties = case.find("properties") - labels.update(property.attrib["name"].partition(":")[2] for property in properties) - - for label in labels: - query = "properties/property[@name='{property}:{label}']" - - durations = [ - float(property.attrib["value"]) - for case in cases - for property in case.xpath(query.format(label=label, property="duration")) - ] - - a_case = cases[0] - - file_path: Path - [file_path] = [ - Path(property.attrib["value"]) for property in a_case.xpath(query.format(label=label, property="path")) - ] - - line: int - [line] = [ - int(property.attrib["value"]) for property in a_case.xpath(query.format(label=label, property="line")) - ] - - limit: float - [limit] = [ - float(property.attrib["value"]) - for property in a_case.xpath(query.format(label=label, property="limit")) - ] - - results.append( - Result( - file_path=file_path, - test_path=test_path, - line=line, - label=label, - durations=tuple(durations), - limit=limit, - ) + for property in case.findall(f"properties/property[@name='{tag}']"): + tag = property.attrib["name"] + data = supported_data_types_by_tag[tag].unmarshal(json.loads(property.attrib["value"])) + event_id = EventId(test_id=test_id, tag=tag, line=data.line, path=data.path, label=data.label) + data_by_event_id[event_id].append(data) + + results: List[Result] = [] + # TODO: yup, datas + for event_id, datas in data_by_event_id.items(): + results.append( + Result( + file_path=event_id.path, + test_path=event_id.test_id.test_path, + ids=event_id.test_id.ids, + line=event_id.line, + # label=label, + durations=tuple(data.duration for data in datas), + limit=datas[0].limit, + label=event_id.label, ) + ) + + handlers = { + BenchmarkData.tag: output_benchmark, + TimeOutAssertData.tag: output_time_out_assert, + } + handler = handlers[data_type.tag] + handler(link_line_separator, link_prefix, markdown, output, percent_margin, randomoji, results) + +def output_benchmark( + link_line_separator: str, + link_prefix: str, + markdown: bool, + output: TextIO, + percent_margin: int, + randomoji: bool, + results: List[Result], +) -> None: if not markdown: - for result in results: + for result in sorted(results): link = result.link(prefix=link_prefix, line_separator=link_line_separator) dumped = json.dumps(result.marshal()) output.write(f"{link} {dumped}\n") @@ -216,6 +239,8 @@ def main( percent_str = f"{percent:.0f} %" test_path_str = ".".join(result.test_path[1:]) + if len(result.ids) > 0: + test_path_str += f"[{'-'.join(result.ids)}]" test_link_text: str if result.label == "": @@ -235,6 +260,82 @@ def main( ) +def output_time_out_assert( + link_line_separator: str, + link_prefix: str, + markdown: bool, + output: TextIO, + percent_margin: int, + randomoji: bool, + results: List[Result], +) -> None: + if not markdown: + for result in sorted(results): + link = result.link(prefix=link_prefix, line_separator=link_line_separator) + dumped = json.dumps(result.marshal()) + output.write(f"{link} {dumped}\n") + else: + output.write("| Test | 🍿 | Mean | Max | 3σ | Limit | Percent |\n") + output.write("| --- | --- | --- | --- | --- | --- | --- |\n") + for result in sorted(results): + link_url = result.link(prefix=link_prefix, line_separator=link_line_separator) + + mean_str = "-" + three_sigma_str = "-" + if len(result.durations) > 1: + durations_mean = mean(result.durations) + mean_str = f"{durations_mean:.3f} s" + + try: + three_sigma_str = f"{durations_mean + 3 * stdev(result.durations):.3f} s" + except StatisticsError: + pass + + durations_max = max(result.durations) + max_str = f"{durations_max:.3f} s" + + limit_str = f"{result.limit:.3f} s" + + percent = 100 * durations_max / result.limit + if percent >= 100: + # intentionally biasing towards 🍄 + choices = "🍄🍄🍎🍅" # 🌶️🍉🍒🍓 + elif percent >= (100 - percent_margin): + choices = "🍋🍌" # 🍍🌽 + else: + choices = "🫛🍈🍏🍐🥝🥒🥬🥦" + + marker: str + if randomoji: + marker = random.choice(choices) + else: + marker = choices[0] + + percent_str = f"{percent:.0f} %" + + test_path_str = ".".join(result.test_path[1:]) + if len(result.ids) > 0: + test_path_str += f"[{'-'.join(result.ids)}]" + + test_link_text: str + if result.label == "": + # TODO: but could be in different files too + test_link_text = f"`{test_path_str}` - {result.line}" + else: + test_link_text = f"`{test_path_str}` - {result.label}" + + output.write( + f"| [{test_link_text}]({link_url})" + + f" | {marker}" + + f" | {mean_str}" + + f" | {max_str}" + + f" | {three_sigma_str}" + + f" | {limit_str}" + + f" | {percent_str}" + + " |\n" + ) + + if __name__ == "__main__": # pylint: disable = no-value-for-parameter main() diff --git a/tests/util/misc.py b/tests/util/misc.py index a3b46bc5b43b..7126743e34c0 100644 --- a/tests/util/misc.py +++ b/tests/util/misc.py @@ -5,28 +5,50 @@ import enum import functools import gc +import json import logging import os import pathlib import subprocess import sys from concurrent.futures import Future -from inspect import getframeinfo, stack +from pathlib import Path from statistics import mean from textwrap import dedent from time import thread_time from types import TracebackType -from typing import Any, Callable, Collection, Iterator, List, Optional, TextIO, Tuple, Type, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Collection, + Dict, + Iterator, + List, + Optional, + Protocol, + TextIO, + Tuple, + Type, + TypeVar, + Union, + cast, + final, +) import pytest + +# TODO: update after resolution in https://github.com/pytest-dev/pytest/issues/7469 +from _pytest.nodes import Node from chia_rs import Coin -from typing_extensions import Protocol, final import chia from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.condition_opcodes import ConditionOpcode from chia.util.hash import std_hash from chia.util.ints import uint64 +from chia.util.misc import caller_file_and_line from chia.wallet.util.compute_hints import HintedCoin from tests import ether from tests.core.data_layer.util import ChiaRoot @@ -64,11 +86,6 @@ def manage_gc(mode: GcMode) -> Iterator[None]: gc.disable() -def caller_file_and_line(distance: int = 1) -> Tuple[str, int]: - caller = getframeinfo(stack()[distance + 1][0]) - return caller.filename, caller.lineno - - @dataclasses.dataclass(frozen=True) class RuntimeResults: start: float @@ -204,6 +221,43 @@ def measure_runtime( print(results.block(label=label)) +@final +@dataclasses.dataclass(frozen=True) +class BenchmarkData: + if TYPE_CHECKING: + _protocol_check: ClassVar[DataTypeProtocol] = cast("BenchmarkData", None) + + tag: ClassVar[str] = "benchmark" + + duration: float + path: pathlib.Path + line: int + limit: float + + label: str + + __match_args__: ClassVar[Tuple[str, ...]] = () + + @classmethod + def unmarshal(cls, marshalled: Dict[str, Any]) -> BenchmarkData: + return cls( + duration=marshalled["duration"], + path=pathlib.Path(marshalled["path"]), + line=int(marshalled["line"]), + limit=marshalled["limit"], + label=marshalled["label"], + ) + + def marshal(self) -> Dict[str, Any]: + return { + "duration": self.duration, + "path": self.path.as_posix(), + "line": self.line, + "limit": self.limit, + "label": self.label, + } + + @final @dataclasses.dataclass class _AssertRuntime: @@ -230,6 +284,7 @@ class _AssertRuntime: # https://github.com/pytest-dev/pytest/issues/2057 seconds: float + # TODO: Optional? label: str = "" clock: Callable[[], float] = thread_time gc_mode: GcMode = GcMode.disable @@ -285,15 +340,18 @@ def __exit__( print(results.block(label=self.label)) if self.record_property is not None: - self.record_property(f"duration:{self.label}", results.duration) - - relative_path_str = ( - pathlib.Path(results.entry_file).relative_to(pathlib.Path(chia.__file__).parent.parent).as_posix() + data = BenchmarkData( + duration=results.duration, + path=pathlib.Path(self.entry_file).relative_to(pathlib.Path(chia.__file__).parent.parent), + line=self.entry_line, + limit=self.seconds, + label=self.label, ) - self.record_property(f"path:{self.label}", relative_path_str) - self.record_property(f"line:{self.label}", results.entry_line) - self.record_property(f"limit:{self.label}", self.seconds) + ether.record_property( + data.tag, + json.dumps(data.marshal(), ensure_ascii=True, sort_keys=True), + ) if exc_type is None and self.enable_assertion: __tracebackhide__ = True @@ -304,7 +362,7 @@ def __exit__( @dataclasses.dataclass class BenchmarkRunner: enable_assertion: bool = True - label: Optional[str] = None + test_id: Optional[TestId] = None overhead: Optional[float] = None @functools.wraps(_AssertRuntime) @@ -407,3 +465,111 @@ def create_logger(file: TextIO = sys.stdout) -> logging.Logger: logger.addHandler(hdlr=stream_handler) return logger + + +@final +@dataclasses.dataclass(frozen=True) +class TestId: + platform: str + test_path: Tuple[str, ...] + ids: Tuple[str, ...] + + @classmethod + def create(cls, node: Node, platform: str = sys.platform) -> TestId: + test_path: List[str] = [] + temp_node = node + while True: + name: str + if isinstance(temp_node, pytest.Function): + name = temp_node.originalname + elif isinstance(temp_node, pytest.Package): + # must check before pytest.Module since Package is a subclass + name = temp_node.name + elif isinstance(temp_node, pytest.Module): + name = temp_node.name[:-3] + else: + name = temp_node.name + test_path.insert(0, name) + if isinstance(temp_node.parent, pytest.Session) or temp_node.parent is None: + break + temp_node = temp_node.parent + + # TODO: can we avoid parsing the id's etc from the node name? + test_name, delimiter, rest = node.name.partition("[") + ids: Tuple[str, ...] + if delimiter == "": + ids = () + else: + ids = tuple(rest.rstrip("]").split("-")) + + return cls( + platform=platform, + test_path=tuple(test_path), + ids=ids, + ) + + @classmethod + def unmarshal(cls, marshalled: Dict[str, Any]) -> TestId: + return cls( + platform=marshalled["platform"], + test_path=tuple(marshalled["test_path"]), + ids=tuple(marshalled["ids"]), + ) + + def marshal(self) -> Dict[str, Any]: + return { + "platform": self.platform, + "test_path": self.test_path, + "ids": self.ids, + } + + +# TODO: i think this class is now unused? +@final +@dataclasses.dataclass(frozen=True) +class JunitPropertyName: + tag: str + id: TestId + file_path: Path + line: int + + # @classmethod + # def create(cls, tag: str, test_id_segments: str, platform: str = sys.platform) -> JunitPropertyName: + # + # return cls(tag=tag, platform=platform, test_name=test_name, ids=ids) + + # @classmethod + # def unmarshal(cls, marshalled: Dict[str, Any]) -> JunitPropertyName: + # return cls( + # tag=marshalled["tag"], + # id=TestId.unmarshal(marshalled["id"]), + # ) + # + # def marshal(self) -> Dict[str, Any]: + # return { + # "tag": self.tag, + # "id": self.id.marshal(), + # } + + +T = TypeVar("T") + + +@dataclasses.dataclass(frozen=True) +class DataTypeProtocol(Protocol): + tag: ClassVar[str] + + line: int + path: Path + label: str + duration: float + limit: float + + __match_args__: ClassVar[Tuple[str, ...]] = () + + @classmethod + def unmarshal(self: Type[T], marshalled: Dict[str, Any]) -> T: + ... + + def marshal(self) -> Dict[str, Any]: + ... From bc365415b362536fae44fef49f54297be9f9eb38 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 08:51:44 -0500 Subject: [PATCH 04/22] remove unused JunitPropertyName --- chia/simulator/time_out_assert.py | 2 -- tests/util/misc.py | 28 ---------------------------- 2 files changed, 30 deletions(-) diff --git a/chia/simulator/time_out_assert.py b/chia/simulator/time_out_assert.py index 6cc8fb69d723..02862ed31ebf 100644 --- a/chia/simulator/time_out_assert.py +++ b/chia/simulator/time_out_assert.py @@ -133,8 +133,6 @@ async def time_out_assert_custom_interval(timeout: float, interval, function, va pass else: if ether.record_property is not None: - # name = JunitPropertyName(tag=TimeOutAssertData.tag, id=ether.test_id) - data = TimeOutAssertData( duration=duration, path=pathlib.Path(entry_file).relative_to(pathlib.Path(chia.__file__).parent.parent), diff --git a/tests/util/misc.py b/tests/util/misc.py index 7126743e34c0..951571197b9b 100644 --- a/tests/util/misc.py +++ b/tests/util/misc.py @@ -524,34 +524,6 @@ def marshal(self) -> Dict[str, Any]: } -# TODO: i think this class is now unused? -@final -@dataclasses.dataclass(frozen=True) -class JunitPropertyName: - tag: str - id: TestId - file_path: Path - line: int - - # @classmethod - # def create(cls, tag: str, test_id_segments: str, platform: str = sys.platform) -> JunitPropertyName: - # - # return cls(tag=tag, platform=platform, test_name=test_name, ids=ids) - - # @classmethod - # def unmarshal(cls, marshalled: Dict[str, Any]) -> JunitPropertyName: - # return cls( - # tag=marshalled["tag"], - # id=TestId.unmarshal(marshalled["id"]), - # ) - # - # def marshal(self) -> Dict[str, Any]: - # return { - # "tag": self.tag, - # "id": self.id.marshal(), - # } - - T = TypeVar("T") From ecddc65b4c086bca943b883ed67c296bcafd9b88 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 08:53:08 -0500 Subject: [PATCH 05/22] rename to `tests/process_junit.py` --- .github/workflows/benchmarks.yml | 3 +-- .github/workflows/test.yml | 3 +-- tests/{process_benchmarks.py => process_junit.py} | 0 3 files changed, 2 insertions(+), 4 deletions(-) rename tests/{process_benchmarks.py => process_junit.py} (100%) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index cbcb92939071..95a534976ab2 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -124,5 +124,4 @@ jobs: - name: Add benchmark results to workflow summary if: always() run: | - # TODO: rename the file since it is more general - python -m tests.process_benchmarks --type benchmark --xml junit-data/benchmarks.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" + python -m tests.process_junit --type benchmark --xml junit-data/benchmarks.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4444030a1fa7..8a49b28f22db 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -150,8 +150,7 @@ jobs: - name: Add time out assert results to workflow summary if: always() run: | - # TODO: rename the file since it is more general - python -m tests.process_benchmarks --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" + python -m tests.process_junit --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" - name: Download Coverage uses: actions/download-artifact@v3 diff --git a/tests/process_benchmarks.py b/tests/process_junit.py similarity index 100% rename from tests/process_benchmarks.py rename to tests/process_junit.py From 25105e36b7e69f06c07502d3fa16fbdfb6c7abbd Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 09:23:43 -0500 Subject: [PATCH 06/22] fixup --- chia/simulator/time_out_assert.py | 3 +-- tests/conftest.py | 4 +++- tests/ether.py | 2 ++ tests/util/misc.py | 3 +-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/chia/simulator/time_out_assert.py b/chia/simulator/time_out_assert.py index 02862ed31ebf..db5dfd3d573c 100644 --- a/chia/simulator/time_out_assert.py +++ b/chia/simulator/time_out_assert.py @@ -10,7 +10,6 @@ import time from typing import Any, Callable, ClassVar, Dict, Optional, Tuple, final, overload -import chia from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.util.misc import caller_file_and_line @@ -135,7 +134,7 @@ async def time_out_assert_custom_interval(timeout: float, interval, function, va if ether.record_property is not None: data = TimeOutAssertData( duration=duration, - path=pathlib.Path(entry_file).relative_to(pathlib.Path(chia.__file__).parent.parent), + path=pathlib.Path(entry_file).relative_to(ether.project_root), line=entry_line, limit=timeout, timed_out=timed_out, diff --git a/tests/conftest.py b/tests/conftest.py index 9a445e6df001..7a637b2894c8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -22,6 +22,7 @@ from _pytest.fixtures import SubRequest from pytest import MonkeyPatch +import tests from chia.clvm.spend_sim import CostLogger # Set spawn after stdlib imports, but before other imports @@ -83,8 +84,9 @@ @pytest.fixture(name="ether_setup", autouse=True) def ether_setup_fixture(request: SubRequest, record_property: Callable[[str, object], None]) -> Iterator[None]: with MonkeyPatch.context() as monkeypatch_context: - monkeypatch_context.setattr(ether, "test_id", TestId.create(node=request.node)) + monkeypatch_context.setattr(ether, "project_root", Path(tests.__file__).parent.parent) monkeypatch_context.setattr(ether, "record_property", record_property) + monkeypatch_context.setattr(ether, "test_id", TestId.create(node=request.node)) yield diff --git a/tests/ether.py b/tests/ether.py index 4acb2c7a68d7..e15999f610ef 100644 --- a/tests/ether.py +++ b/tests/ether.py @@ -1,5 +1,6 @@ from __future__ import annotations +from pathlib import Path from typing import TYPE_CHECKING, Callable if TYPE_CHECKING: @@ -11,5 +12,6 @@ # populated until tests are running. # TODO: should we enforce checking every use for not None? +project_root: Path = None # type: ignore[assignment] record_property: Callable[[str, object], None] = None # type: ignore[assignment] test_id: TestId = None # type: ignore[assignment] diff --git a/tests/util/misc.py b/tests/util/misc.py index 951571197b9b..22108e8ba9f3 100644 --- a/tests/util/misc.py +++ b/tests/util/misc.py @@ -43,7 +43,6 @@ from _pytest.nodes import Node from chia_rs import Coin -import chia from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.condition_opcodes import ConditionOpcode from chia.util.hash import std_hash @@ -342,7 +341,7 @@ def __exit__( if self.record_property is not None: data = BenchmarkData( duration=results.duration, - path=pathlib.Path(self.entry_file).relative_to(pathlib.Path(chia.__file__).parent.parent), + path=pathlib.Path(self.entry_file).relative_to(ether.project_root), line=self.entry_line, limit=self.seconds, label=self.label, From c3944af20ad912cb5bab21032ef09858d9922491 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 09:44:50 -0500 Subject: [PATCH 07/22] hinting and pylint --- chia/simulator/time_out_assert.py | 4 ++-- tests/conftest.py | 3 ++- tests/ether.py | 9 ++++----- tests/util/misc.py | 7 +++---- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/chia/simulator/time_out_assert.py b/chia/simulator/time_out_assert.py index db5dfd3d573c..20a55c4ee6e4 100644 --- a/chia/simulator/time_out_assert.py +++ b/chia/simulator/time_out_assert.py @@ -131,7 +131,7 @@ async def time_out_assert_custom_interval(timeout: float, interval, function, va except ImportError: pass else: - if ether.record_property is not None: + if ether.record_property is not None and ether.project_root is not None: data = TimeOutAssertData( duration=duration, path=pathlib.Path(entry_file).relative_to(ether.project_root), @@ -140,7 +140,7 @@ async def time_out_assert_custom_interval(timeout: float, interval, function, va timed_out=timed_out, ) - ether.record_property( + ether.record_property( # pylint: disable=E1102 # json.dumps(name.marshal(), ensure_ascii=True, sort_keys=True), data.tag, json.dumps(data.marshal(), ensure_ascii=True, sort_keys=True), diff --git a/tests/conftest.py b/tests/conftest.py index 7a637b2894c8..bb6d4e702393 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -91,7 +91,8 @@ def ether_setup_fixture(request: SubRequest, record_property: Callable[[str, obj @pytest.fixture(autouse=True) -def record_test_id_property_fixture(ether_setup: None, record_property: Callable[[str, object], None]) -> None: +def ether_test_id_property_fixture(ether_setup: None, record_property: Callable[[str, object], None]) -> None: + assert ether.test_id is not None, "ether.test_id is None, did you forget to use the ether_setup fixture?" record_property("test_id", json.dumps(ether.test_id.marshal(), ensure_ascii=True, sort_keys=True)) diff --git a/tests/ether.py b/tests/ether.py index e15999f610ef..026e27f222ad 100644 --- a/tests/ether.py +++ b/tests/ether.py @@ -1,7 +1,7 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING, Callable, Optional if TYPE_CHECKING: from tests.util.misc import TestId @@ -11,7 +11,6 @@ # result in you likely getting the default `None` values since they are not # populated until tests are running. -# TODO: should we enforce checking every use for not None? -project_root: Path = None # type: ignore[assignment] -record_property: Callable[[str, object], None] = None # type: ignore[assignment] -test_id: TestId = None # type: ignore[assignment] +project_root: Optional[Path] = None +record_property: Optional[Callable[[str, object], None]] = None +test_id: Optional[TestId] = None diff --git a/tests/util/misc.py b/tests/util/misc.py index 22108e8ba9f3..24a066d4d901 100644 --- a/tests/util/misc.py +++ b/tests/util/misc.py @@ -295,7 +295,6 @@ class _AssertRuntime: runtime_manager: Optional[contextlib.AbstractContextManager[Future[RuntimeResults]]] = None runtime_results_callable: Optional[Future[RuntimeResults]] = None enable_assertion: bool = True - record_property: Optional[Callable[[str, object], None]] = None def __enter__(self) -> Future[AssertRuntimeResults]: self.entry_file, self.entry_line = caller_file_and_line() @@ -338,7 +337,7 @@ def __exit__( if self.print: print(results.block(label=self.label)) - if self.record_property is not None: + if ether.record_property is not None and ether.project_root is not None: data = BenchmarkData( duration=results.duration, path=pathlib.Path(self.entry_file).relative_to(ether.project_root), @@ -347,7 +346,7 @@ def __exit__( label=self.label, ) - ether.record_property( + ether.record_property( # pylint: disable=E1102 data.tag, json.dumps(data.marshal(), ensure_ascii=True, sort_keys=True), ) @@ -539,7 +538,7 @@ class DataTypeProtocol(Protocol): __match_args__: ClassVar[Tuple[str, ...]] = () @classmethod - def unmarshal(self: Type[T], marshalled: Dict[str, Any]) -> T: + def unmarshal(cls: Type[T], marshalled: Dict[str, Any]) -> T: ... def marshal(self) -> Dict[str, Any]: From 651a1e2ec94bf0a8e7294d1f09eee2d24a66ccf6 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 09:57:19 -0500 Subject: [PATCH 08/22] fixup --- tests/util/misc.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/util/misc.py b/tests/util/misc.py index 24a066d4d901..eff2bd5a33ed 100644 --- a/tests/util/misc.py +++ b/tests/util/misc.py @@ -367,7 +367,6 @@ class BenchmarkRunner: def assert_runtime(self, *args: Any, **kwargs: Any) -> _AssertRuntime: kwargs.setdefault("enable_assertion", self.enable_assertion) kwargs.setdefault("overhead", self.overhead) - kwargs.setdefault("record_property", ether.record_property) return _AssertRuntime(*args, **kwargs) From 88723738a69345da88a3bda291d600e1090b8500 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 11:03:47 -0500 Subject: [PATCH 09/22] after install --- .github/workflows/test.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8a49b28f22db..31b56a3a02d7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -147,11 +147,6 @@ jobs: path: junit-results/* if-no-files-found: error - - name: Add time out assert results to workflow summary - if: always() - run: | - python -m tests.process_junit --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" - - name: Download Coverage uses: actions/download-artifact@v3 with: @@ -170,6 +165,11 @@ jobs: - uses: chia-network/actions/activate-venv@main + - name: Add time out assert results to workflow summary + if: always() + run: | + python -m tests.process_junit --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" + - name: Coverage Processing run: | coverage combine --rcfile=.coveragerc --data-file=coverage-reports/.coverage coverage-data/ From 333e328d967888a02721bd5f26fdaad21e78b9cf Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 12:04:43 -0500 Subject: [PATCH 10/22] handle skipped tests --- tests/process_junit.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/process_junit.py b/tests/process_junit.py index c68badfb298d..a758cb5ea4a1 100644 --- a/tests/process_junit.py +++ b/tests/process_junit.py @@ -146,7 +146,10 @@ def main( cases_by_test_id: defaultdict[TestId, List[lxml.etree.Element]] = defaultdict(list) for suite in root.findall("testsuite"): for case in suite.findall("testcase"): - test_id = TestId.unmarshal(json.loads(case.find("properties/property[@name='test_id']").attrib["value"])) + if case.find("skipped") is not None: + continue + test_id_property = case.find("properties/property[@name='test_id']") + test_id = TestId.unmarshal(json.loads(test_id_property.attrib["value"])) test_id = dataclasses.replace( test_id, ids=tuple(id for id in test_id.ids if not id.startswith(f"{data_type.tag}_repeat")) ) From 25c8fe5af5dced06972631b22132f0f322cfdd2f Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 17:34:07 -0500 Subject: [PATCH 11/22] --only plot_sync/ --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 31b56a3a02d7..b3bb1d116944 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -38,7 +38,7 @@ jobs: - name: Generate matrix configuration id: configure run: | - python tests/build-job-matrix.py --per directory --verbose > matrix.json + python tests/build-job-matrix.py --per directory --verbose --only plot_sync/ > matrix.json cat matrix.json echo configuration=$(cat matrix.json) >> "$GITHUB_OUTPUT" echo matrix_mode=${{ ( github.event_name == 'workflow_dispatch' ) && 'all' || ( github.repository_owner == 'Chia-Network' && github.repository != 'Chia-Network/chia-blockchain' ) && 'limited' || ( github.repository_owner == 'Chia-Network' && github.repository == 'Chia-Network/chia-blockchain' && github.ref == 'refs/heads/main' ) && 'main' || ( github.repository_owner == 'Chia-Network' && github.repository == 'Chia-Network/chia-blockchain' && startsWith(github.ref, 'refs/heads/release/') ) && 'all' || ( github.repository_owner == 'Chia-Network' && github.repository == 'Chia-Network/chia-blockchain' && startsWith(github.base_ref, 'release/') ) && 'all' || 'main' }} >> "$GITHUB_OUTPUT" From 2636e09716db864ba9b6e8da6565b20bdd3d2c91 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 18:32:03 -0500 Subject: [PATCH 12/22] better relative paths --- chia/simulator/time_out_assert.py | 13 ++++++++++--- chia/util/misc.py | 14 ++++++++++++-- tests/conftest.py | 1 - tests/ether.py | 4 +--- tests/util/misc.py | 20 ++++++++++++++++---- 5 files changed, 39 insertions(+), 13 deletions(-) diff --git a/chia/simulator/time_out_assert.py b/chia/simulator/time_out_assert.py index 20a55c4ee6e4..ccdb507d9b94 100644 --- a/chia/simulator/time_out_assert.py +++ b/chia/simulator/time_out_assert.py @@ -10,6 +10,8 @@ import time from typing import Any, Callable, ClassVar, Dict, Optional, Tuple, final, overload +import chia +import tests from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.util.misc import caller_file_and_line @@ -99,7 +101,12 @@ async def time_out_assert_custom_interval(timeout: float, interval, function, va __tracebackhide__ = True # TODO: wrong line when not called directly but instead from the regular time_out_assert? - entry_file, entry_line = caller_file_and_line() + entry_file, entry_line = caller_file_and_line( + relative_to=( + pathlib.Path(chia.__file__).parent.parent, + pathlib.Path(tests.__file__).parent.parent, + ) + ) timeout = adjusted_timeout(timeout=timeout) @@ -131,10 +138,10 @@ async def time_out_assert_custom_interval(timeout: float, interval, function, va except ImportError: pass else: - if ether.record_property is not None and ether.project_root is not None: + if ether.record_property is not None: data = TimeOutAssertData( duration=duration, - path=pathlib.Path(entry_file).relative_to(ether.project_root), + path=pathlib.Path(entry_file).relative_to(pathlib.Path(chia.__file__).parent.parent), line=entry_line, limit=timeout, timed_out=timed_out, diff --git a/chia/util/misc.py b/chia/util/misc.py index ec6f3791d8f9..6b2a6d3d4ffb 100644 --- a/chia/util/misc.py +++ b/chia/util/misc.py @@ -16,6 +16,7 @@ Collection, Dict, Generic, + Iterable, Iterator, List, Optional, @@ -284,6 +285,15 @@ def setup_async_signal_handler(self, handler: AsyncHandler) -> None: ) -def caller_file_and_line(distance: int = 1) -> Tuple[str, int]: +def caller_file_and_line(distance: int = 1, relative_to: Iterable[Path] = ()) -> Tuple[str, int]: caller = getframeinfo(stack()[distance + 1][0]) - return caller.filename, caller.lineno + + caller_path = Path(caller.filename) + options: List[str] = [caller_path.as_posix()] + for path in relative_to: + try: + options.append(caller_path.relative_to(path).as_posix()) + except ValueError: + pass + + return min(options, key=len), caller.lineno diff --git a/tests/conftest.py b/tests/conftest.py index bb6d4e702393..288f37892232 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -84,7 +84,6 @@ @pytest.fixture(name="ether_setup", autouse=True) def ether_setup_fixture(request: SubRequest, record_property: Callable[[str, object], None]) -> Iterator[None]: with MonkeyPatch.context() as monkeypatch_context: - monkeypatch_context.setattr(ether, "project_root", Path(tests.__file__).parent.parent) monkeypatch_context.setattr(ether, "record_property", record_property) monkeypatch_context.setattr(ether, "test_id", TestId.create(node=request.node)) yield diff --git a/tests/ether.py b/tests/ether.py index 026e27f222ad..a56712e443f1 100644 --- a/tests/ether.py +++ b/tests/ether.py @@ -1,16 +1,14 @@ from __future__ import annotations -from pathlib import Path from typing import TYPE_CHECKING, Callable, Optional if TYPE_CHECKING: from tests.util.misc import TestId # NOTE: When using this module do not import the attributes directly, rather import -# something like `from tests import ether`. Importing attributes direclty will +# something like `from tests import ether`. Importing attributes directly will # result in you likely getting the default `None` values since they are not # populated until tests are running. -project_root: Optional[Path] = None record_property: Optional[Callable[[str, object], None]] = None test_id: Optional[TestId] = None diff --git a/tests/util/misc.py b/tests/util/misc.py index eff2bd5a33ed..2ea95400ebeb 100644 --- a/tests/util/misc.py +++ b/tests/util/misc.py @@ -43,6 +43,8 @@ from _pytest.nodes import Node from chia_rs import Coin +import chia +import tests from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.condition_opcodes import ConditionOpcode from chia.util.hash import std_hash @@ -190,7 +192,12 @@ def measure_runtime( overhead: Optional[float] = None, print_results: bool = True, ) -> Iterator[Future[RuntimeResults]]: - entry_file, entry_line = caller_file_and_line() + entry_file, entry_line = caller_file_and_line( + relative_to=( + pathlib.Path(chia.__file__).parent.parent, + pathlib.Path(tests.__file__).parent.parent, + ) + ) results_future: Future[RuntimeResults] = Future() @@ -297,7 +304,12 @@ class _AssertRuntime: enable_assertion: bool = True def __enter__(self) -> Future[AssertRuntimeResults]: - self.entry_file, self.entry_line = caller_file_and_line() + self.entry_file, self.entry_line = caller_file_and_line( + relative_to=( + pathlib.Path(chia.__file__).parent.parent, + pathlib.Path(tests.__file__).parent.parent, + ) + ) self.runtime_manager = measure_runtime( clock=self.clock, gc_mode=self.gc_mode, overhead=self.overhead, print_results=False @@ -337,10 +349,10 @@ def __exit__( if self.print: print(results.block(label=self.label)) - if ether.record_property is not None and ether.project_root is not None: + if ether.record_property is not None: data = BenchmarkData( duration=results.duration, - path=pathlib.Path(self.entry_file).relative_to(ether.project_root), + path=pathlib.Path(self.entry_file).relative_to(pathlib.Path(tests.__file__).parent.parent), line=self.entry_line, limit=self.seconds, label=self.label, From 5bc65f394af74d0171943ce7f45fd043e6e3e8bf Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 18:43:15 -0500 Subject: [PATCH 13/22] better relative paths --- chia/simulator/time_out_assert.py | 2 +- tests/ether.py | 7 ++++++- tests/util/misc.py | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/chia/simulator/time_out_assert.py b/chia/simulator/time_out_assert.py index ccdb507d9b94..ffff64070f60 100644 --- a/chia/simulator/time_out_assert.py +++ b/chia/simulator/time_out_assert.py @@ -141,7 +141,7 @@ async def time_out_assert_custom_interval(timeout: float, interval, function, va if ether.record_property is not None: data = TimeOutAssertData( duration=duration, - path=pathlib.Path(entry_file).relative_to(pathlib.Path(chia.__file__).parent.parent), + path=pathlib.Path(entry_file), line=entry_line, limit=timeout, timed_out=timed_out, diff --git a/tests/ether.py b/tests/ether.py index a56712e443f1..f1ecb3efb415 100644 --- a/tests/ether.py +++ b/tests/ether.py @@ -5,7 +5,12 @@ if TYPE_CHECKING: from tests.util.misc import TestId -# NOTE: When using this module do not import the attributes directly, rather import +# NOTE: Do not just put any useful thing here. This is specifically for making +# fixture values globally available during tests. In _most_ cases fixtures +# should be directly requested using normal mechanisms. Very little should +# be put here. + +# NOTE: When using this module do not import the attributes directly. Rather, import # something like `from tests import ether`. Importing attributes directly will # result in you likely getting the default `None` values since they are not # populated until tests are running. diff --git a/tests/util/misc.py b/tests/util/misc.py index 2ea95400ebeb..57a7db70f528 100644 --- a/tests/util/misc.py +++ b/tests/util/misc.py @@ -352,7 +352,7 @@ def __exit__( if ether.record_property is not None: data = BenchmarkData( duration=results.duration, - path=pathlib.Path(self.entry_file).relative_to(pathlib.Path(tests.__file__).parent.parent), + path=pathlib.Path(self.entry_file), line=self.entry_line, limit=self.seconds, label=self.label, From 5f425fc652795a57731c8cbca70d363a5076c516 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 19:22:01 -0500 Subject: [PATCH 14/22] todos --- chia/simulator/time_out_assert.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/chia/simulator/time_out_assert.py b/chia/simulator/time_out_assert.py index ffff64070f60..4a09f8c8933a 100644 --- a/chia/simulator/time_out_assert.py +++ b/chia/simulator/time_out_assert.py @@ -72,7 +72,6 @@ class TimeOutAssertData: limit: float timed_out: bool - # TODO: can we make this not required maybe? label: str = "" __match_args__: ClassVar[Tuple[str, ...]] = () @@ -97,15 +96,18 @@ def marshal(self) -> Dict[str, Any]: } -async def time_out_assert_custom_interval(timeout: float, interval, function, value=True, *args, **kwargs): +async def time_out_assert_custom_interval( + timeout: float, interval, function, value=True, *args, stack_distance=0, **kwargs +): __tracebackhide__ = True # TODO: wrong line when not called directly but instead from the regular time_out_assert? entry_file, entry_line = caller_file_and_line( + distance=stack_distance + 1, relative_to=( pathlib.Path(chia.__file__).parent.parent, pathlib.Path(tests.__file__).parent.parent, - ) + ), ) timeout = adjusted_timeout(timeout=timeout) @@ -156,10 +158,20 @@ async def time_out_assert_custom_interval(timeout: float, interval, function, va async def time_out_assert(timeout: int, function, value=True, *args, **kwargs): __tracebackhide__ = True - await time_out_assert_custom_interval(timeout, 0.05, function, value, *args, **kwargs) + await time_out_assert_custom_interval( + timeout, + 0.05, + function, + value, + *args, + **kwargs, + stack_distance=1, + ) async def time_out_assert_not_none(timeout: float, function, *args, **kwargs): + # TODO: rework to leverage time_out_assert_custom_interval() such as by allowing + # value to be a callable __tracebackhide__ = True timeout = adjusted_timeout(timeout=timeout) From 060ed312c46fb56836762be97788a968921ae142 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Wed, 15 Nov 2023 19:31:58 -0500 Subject: [PATCH 15/22] todos --- tests/process_junit.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/process_junit.py b/tests/process_junit.py index a758cb5ea4a1..0b7d9a731782 100644 --- a/tests/process_junit.py +++ b/tests/process_junit.py @@ -67,7 +67,6 @@ def sub(matchobj: re.Match[str]) -> str: @dataclasses.dataclass(frozen=True) class EventId: test_id: TestId - # TODO: this doesn't feel quite right, more a property than an identifier? i dunno tag: str line: int path: Path @@ -165,17 +164,16 @@ def main( data_by_event_id[event_id].append(data) results: List[Result] = [] - # TODO: yup, datas for event_id, datas in data_by_event_id.items(): + [limit] = {data.limit for data in datas} results.append( Result( file_path=event_id.path, test_path=event_id.test_id.test_path, ids=event_id.test_id.ids, line=event_id.line, - # label=label, durations=tuple(data.duration for data in datas), - limit=datas[0].limit, + limit=limit, label=event_id.label, ) ) From dab54e35612cef0248ae060cd94529b73f71f6f4 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Thu, 16 Nov 2023 12:55:24 -0500 Subject: [PATCH 16/22] tidy --- tests/process_junit.py | 2 +- tests/util/time_out_assert.py | 43 +++++++++++++++-------------------- 2 files changed, 19 insertions(+), 26 deletions(-) diff --git a/tests/process_junit.py b/tests/process_junit.py index 0b7d9a731782..c71a337cb2df 100644 --- a/tests/process_junit.py +++ b/tests/process_junit.py @@ -13,8 +13,8 @@ import click import lxml.etree -from chia.simulator.time_out_assert import TimeOutAssertData from tests.util.misc import BenchmarkData, DataTypeProtocol, TestId +from tests.util.time_out_assert import TimeOutAssertData supported_data_types: List[Type[DataTypeProtocol]] = [TimeOutAssertData, BenchmarkData] supported_data_types_by_tag: Dict[str, Type[DataTypeProtocol]] = {cls.tag: cls for cls in supported_data_types} diff --git a/tests/util/time_out_assert.py b/tests/util/time_out_assert.py index 508f4bb02d87..4252d35756fc 100644 --- a/tests/util/time_out_assert.py +++ b/tests/util/time_out_assert.py @@ -6,13 +6,15 @@ import logging import pathlib import time -from typing import Any, Callable, ClassVar, Dict, Tuple, final +from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Tuple, cast, final import chia import tests from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.util.misc import caller_file_and_line from chia.util.timing import adjusted_timeout +from tests import ether +from tests.util.misc import DataTypeProtocol log = logging.getLogger(__name__) @@ -20,9 +22,8 @@ @final @dataclasses.dataclass(frozen=True) class TimeOutAssertData: - # TODO: deal with import directions etc so we can check this - # if TYPE_CHECKING: - # _protocol_check: ClassVar[DataTypeProtocol] = cast("TimeOutAssertData", None) + if TYPE_CHECKING: + _protocol_check: ClassVar[DataTypeProtocol] = cast("TimeOutAssertData", None) tag: ClassVar[str] = "time_out_assert" @@ -61,7 +62,6 @@ async def time_out_assert_custom_interval( ): __tracebackhide__ = True - # TODO: wrong line when not called directly but instead from the regular time_out_assert? entry_file, entry_line = caller_file_and_line( distance=stack_distance + 1, relative_to=( @@ -94,26 +94,19 @@ async def time_out_assert_custom_interval( await asyncio.sleep(min(interval, timeout - duration)) finally: - try: - # TODO: this import is going the wrong direction - from tests import ether - except ImportError: - pass - else: - if ether.record_property is not None: - data = TimeOutAssertData( - duration=duration, - path=pathlib.Path(entry_file), - line=entry_line, - limit=timeout, - timed_out=timed_out, - ) - - ether.record_property( # pylint: disable=E1102 - # json.dumps(name.marshal(), ensure_ascii=True, sort_keys=True), - data.tag, - json.dumps(data.marshal(), ensure_ascii=True, sort_keys=True), - ) + if ether.record_property is not None: + data = TimeOutAssertData( + duration=duration, + path=pathlib.Path(entry_file), + line=entry_line, + limit=timeout, + timed_out=timed_out, + ) + + ether.record_property( # pylint: disable=E1102 + data.tag, + json.dumps(data.marshal(), ensure_ascii=True, sort_keys=True), + ) async def time_out_assert(timeout: int, function, value=True, *args, **kwargs): From 9f0529d0f7b5d4d9ee6d44af8e0aabfebb75adc7 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Thu, 16 Nov 2023 13:11:29 -0500 Subject: [PATCH 17/22] back to full test run --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b3bb1d116944..31b56a3a02d7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -38,7 +38,7 @@ jobs: - name: Generate matrix configuration id: configure run: | - python tests/build-job-matrix.py --per directory --verbose --only plot_sync/ > matrix.json + python tests/build-job-matrix.py --per directory --verbose > matrix.json cat matrix.json echo configuration=$(cat matrix.json) >> "$GITHUB_OUTPUT" echo matrix_mode=${{ ( github.event_name == 'workflow_dispatch' ) && 'all' || ( github.repository_owner == 'Chia-Network' && github.repository != 'Chia-Network/chia-blockchain' ) && 'limited' || ( github.repository_owner == 'Chia-Network' && github.repository == 'Chia-Network/chia-blockchain' && github.ref == 'refs/heads/main' ) && 'main' || ( github.repository_owner == 'Chia-Network' && github.repository == 'Chia-Network/chia-blockchain' && startsWith(github.ref, 'refs/heads/release/') ) && 'all' || ( github.repository_owner == 'Chia-Network' && github.repository == 'Chia-Network/chia-blockchain' && startsWith(github.base_ref, 'release/') ) && 'all' || 'main' }} >> "$GITHUB_OUTPUT" From ab623e41abf6a32ae666cd2ea53bd4a4dad64dec Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Thu, 16 Nov 2023 13:12:41 -0500 Subject: [PATCH 18/22] add markdown headings --- tests/process_junit.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/process_junit.py b/tests/process_junit.py index c71a337cb2df..f365a46af491 100644 --- a/tests/process_junit.py +++ b/tests/process_junit.py @@ -201,6 +201,8 @@ def output_benchmark( dumped = json.dumps(result.marshal()) output.write(f"{link} {dumped}\n") else: + output.write("# Benchmark Metrics\n\n") + output.write("| Test | 🍿 | Mean | Max | 3σ | Limit | Percent |\n") output.write("| --- | --- | --- | --- | --- | --- | --- |\n") for result in sorted(results): @@ -276,6 +278,8 @@ def output_time_out_assert( dumped = json.dumps(result.marshal()) output.write(f"{link} {dumped}\n") else: + output.write("# Time Out Assert Metrics\n\n") + output.write("| Test | 🍿 | Mean | Max | 3σ | Limit | Percent |\n") output.write("| --- | --- | --- | --- | --- | --- | --- |\n") for result in sorted(results): From 1650895453ce23c05f3d6c7315c127d0e25edaa9 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Thu, 16 Nov 2023 15:10:26 -0500 Subject: [PATCH 19/22] just make results an artifact $GITHUB_STEP_SUMMARY upload aborted, supports content up to a size of 1024k, got 6067k. --- .github/workflows/test.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 31b56a3a02d7..5c15b59fa279 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -139,14 +139,6 @@ jobs: path: junit-data/* if-no-files-found: error - - name: Publish JUnit results - if: always() - uses: actions/upload-artifact@v3 - with: - name: junit-results - path: junit-results/* - if-no-files-found: error - - name: Download Coverage uses: actions/download-artifact@v3 with: @@ -168,7 +160,15 @@ jobs: - name: Add time out assert results to workflow summary if: always() run: | - python -m tests.process_junit --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" + python -m tests.process_junit --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> junit-results/time_out_assert.md + + - name: Publish JUnit results + if: always() + uses: actions/upload-artifact@v3 + with: + name: junit-results + path: junit-results/* + if-no-files-found: error - name: Coverage Processing run: | From ccd61f7d8cb4ced8752edce675142d79d28a76b3 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Thu, 16 Nov 2023 19:49:21 -0500 Subject: [PATCH 20/22] also output the top 50 to the step summary --- .github/workflows/test.yml | 3 ++- tests/process_junit.py | 23 +++++++++++++++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5c15b59fa279..1d93be599753 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -160,7 +160,8 @@ jobs: - name: Add time out assert results to workflow summary if: always() run: | - python -m tests.process_junit --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> junit-results/time_out_assert.md + python -m tests.process_junit --limit 50 --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY" + python -m tests.process_junit --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> junit-results/time_out_assert.md - name: Publish JUnit results if: always() diff --git a/tests/process_junit.py b/tests/process_junit.py index f365a46af491..1fa256ddbbe6 100644 --- a/tests/process_junit.py +++ b/tests/process_junit.py @@ -8,7 +8,7 @@ from dataclasses import dataclass, field from pathlib import Path from statistics import StatisticsError, mean, stdev -from typing import Any, Dict, List, TextIO, Tuple, Type, final +from typing import Any, Dict, List, Optional, TextIO, Tuple, Type, final import click import lxml.etree @@ -127,6 +127,12 @@ class EventId: required=True, show_default=True, ) +@click.option( + "--limit", + "result_count_limit", + type=int, + help="Limit the number of results to output.", +) def main( xml_file: TextIO, link_prefix: str, @@ -136,6 +142,7 @@ def main( percent_margin: int, randomoji: bool, tag: str, + result_count_limit: Optional[int], ) -> None: data_type = supported_data_types_by_tag[tag] @@ -178,12 +185,24 @@ def main( ) ) + if result_count_limit is not None: + results = sorted(results, key=lambda result: max(result.durations) / result.limit, reverse=True) + results = results[:result_count_limit] + handlers = { BenchmarkData.tag: output_benchmark, TimeOutAssertData.tag: output_time_out_assert, } handler = handlers[data_type.tag] - handler(link_line_separator, link_prefix, markdown, output, percent_margin, randomoji, results) + handler( + link_line_separator=link_line_separator, + link_prefix=link_prefix, + markdown=markdown, + output=output, + percent_margin=percent_margin, + randomoji=randomoji, + results=results, + ) def output_benchmark( From de39fc06bfd154308455936d22c4c5a7e5ce4d19 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Thu, 7 Dec 2023 13:41:21 -0500 Subject: [PATCH 21/22] Update process_junit.py --- tests/process_junit.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/tests/process_junit.py b/tests/process_junit.py index 1fa256ddbbe6..c58788aeefea 100644 --- a/tests/process_junit.py +++ b/tests/process_junit.py @@ -48,21 +48,6 @@ def link(self, prefix: str, line_separator: str) -> str: return f"{prefix}{self.file_path.as_posix()}{line_separator}{self.line}" -def sub(matchobj: re.Match[str]) -> str: - result = "" - - if matchobj.group("start") == "[": - result += "[" - - if matchobj.group("start") == matchobj.group("end") == "-": - result += "-" - - if matchobj.group("end") == "]": - result += "]" - - return result - - @final @dataclasses.dataclass(frozen=True) class EventId: From 9d45092543f4e7f6033b7d76f0072436fab44d1c Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Thu, 7 Dec 2023 17:22:10 -0500 Subject: [PATCH 22/22] Update process_junit.py --- tests/process_junit.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/process_junit.py b/tests/process_junit.py index c58788aeefea..13b5fc6c558c 100644 --- a/tests/process_junit.py +++ b/tests/process_junit.py @@ -3,7 +3,6 @@ import dataclasses import json import random -import re from collections import defaultdict from dataclasses import dataclass, field from pathlib import Path