Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add time out assert data collection and presentation #16831

Merged
merged 32 commits into from
Feb 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
97580ae
add tests.ether
altendky Nov 14, 2023
91bf438
add new tests/ether.py
altendky Nov 14, 2023
a50b221
go time out assert
altendky Nov 15, 2023
bc36541
remove unused JunitPropertyName
altendky Nov 15, 2023
ecddc65
rename to `tests/process_junit.py`
altendky Nov 15, 2023
25105e3
fixup
altendky Nov 15, 2023
c3944af
hinting and pylint
altendky Nov 15, 2023
651a1e2
fixup
altendky Nov 15, 2023
8872373
after install
altendky Nov 15, 2023
333e328
handle skipped tests
altendky Nov 15, 2023
25c8fe5
--only plot_sync/
altendky Nov 15, 2023
2636e09
better relative paths
altendky Nov 15, 2023
5bc65f3
better relative paths
altendky Nov 15, 2023
5f425fc
todos
altendky Nov 16, 2023
060ed31
todos
altendky Nov 16, 2023
2143b17
Merge branch 'main' into tests.ether
altendky Nov 16, 2023
dab54e3
tidy
altendky Nov 16, 2023
9f0529d
back to full test run
altendky Nov 16, 2023
ab623e4
add markdown headings
altendky Nov 16, 2023
1650895
just make results an artifact
altendky Nov 16, 2023
ccd61f7
also output the top 50 to the step summary
altendky Nov 17, 2023
c153ae2
Merge branch 'main' into tests.ether
altendky Nov 17, 2023
b5ff65c
Merge branch 'main' into tests.ether
altendky Dec 6, 2023
de39fc0
Update process_junit.py
altendky Dec 7, 2023
9d45092
Update process_junit.py
altendky Dec 7, 2023
ede4936
Merge branch 'main' into tests.ether
altendky Dec 12, 2023
4a604d3
Merge branch 'main' into tests.ether
altendky Jan 2, 2024
31a8ba5
Merge branch 'main' into tests.ether
altendky Jan 4, 2024
4a98fd7
Merge branch 'main' into tests.ether
altendky Jan 11, 2024
d677c1a
Merge branch 'main' into tests.ether
altendky Jan 16, 2024
b9ae553
Merge branch 'main' into tests.ether
altendky Jan 31, 2024
35b880a
Merge branch 'main' into tests.ether
altendky Feb 2, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
/.github/* @Chia-Network/actions-reviewers
/PRETTY_GOOD_PRACTICES.md @altendky @Chia-Network/required-reviewers
/pylintrc @altendky @Chia-Network/required-reviewers
/tests/ether.py @altendky @Chia-Network/required-reviewers
2 changes: 1 addition & 1 deletion .github/workflows/benchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -124,4 +124,4 @@ jobs:
- name: Add benchmark results to workflow summary
if: always()
run: |
python -m tests.process_benchmarks --xml junit-data/benchmarks.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY"
python -m tests.process_junit --type benchmark --xml junit-data/benchmarks.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY"
22 changes: 14 additions & 8 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,6 @@ jobs:
path: junit-data/*
if-no-files-found: error

- name: Publish JUnit results
if: always()
uses: actions/upload-artifact@v4
with:
name: junit-results
path: junit-results/*
if-no-files-found: error

- name: Download Coverage
uses: actions/download-artifact@v4
with:
Expand All @@ -169,6 +161,20 @@ jobs:

- uses: chia-network/actions/activate-venv@main

- name: Add time out assert results to workflow summary
if: always()
run: |
python -m tests.process_junit --limit 50 --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY"
python -m tests.process_junit --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> junit-results/time_out_assert.md

- name: Publish JUnit results
if: always()
uses: actions/upload-artifact@v4
with:
name: junit-results
path: junit-results/*
if-no-files-found: error

- name: Coverage Processing
run: |
coverage combine --rcfile=.coveragerc --data-file=coverage-reports/.coverage coverage-data/
Expand Down
17 changes: 17 additions & 0 deletions chia/util/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import signal
import sys
from dataclasses import dataclass
from inspect import getframeinfo, stack
from pathlib import Path
from types import FrameType
from typing import (
Expand All @@ -19,10 +20,12 @@
ContextManager,
Dict,
Generic,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
final,
Expand Down Expand Up @@ -421,3 +424,17 @@ def available_logical_cores() -> int:
return count

return len(psutil.Process().cpu_affinity())


def caller_file_and_line(distance: int = 1, relative_to: Iterable[Path] = ()) -> Tuple[str, int]:
caller = getframeinfo(stack()[distance + 1][0])

caller_path = Path(caller.filename)
options: List[str] = [caller_path.as_posix()]
for path in relative_to:
try:
options.append(caller_path.relative_to(path).as_posix())
except ValueError:
pass

return min(options, key=len), caller.lineno
49 changes: 43 additions & 6 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import dataclasses
import datetime
import functools
import json
import math
import multiprocessing
import os
Expand All @@ -20,7 +21,9 @@

# TODO: update after resolution in https://github.com/pytest-dev/pytest/issues/7469
from _pytest.fixtures import SubRequest
from pytest import MonkeyPatch

import tests
from chia.clvm.spend_sim import CostLogger
from chia.consensus.constants import ConsensusConstants
from chia.full_node.full_node import FullNode
Expand Down Expand Up @@ -66,10 +69,11 @@
from chia.util.task_timing import start_task_instrumentation, stop_task_instrumentation
from chia.wallet.wallet_node import WalletNode
from chia.wallet.wallet_node_api import WalletNodeAPI
from tests import ether
from tests.core.data_layer.util import ChiaRoot
from tests.core.node_height import node_height_at_least
from tests.simulation.test_simulation import test_constants_modified
from tests.util.misc import BenchmarkRunner, GcMode, RecordingWebServer, _AssertRuntime, measure_overhead
from tests.util.misc import BenchmarkRunner, GcMode, RecordingWebServer, TestId, _AssertRuntime, measure_overhead
from tests.util.setup_nodes import (
OldSimulatorsAndWallets,
SimulatorsAndWallets,
Expand All @@ -91,6 +95,20 @@
from tests.util.setup_nodes import setup_farmer_multi_harvester


@pytest.fixture(name="ether_setup", autouse=True)
def ether_setup_fixture(request: SubRequest, record_property: Callable[[str, object], None]) -> Iterator[None]:
with MonkeyPatch.context() as monkeypatch_context:
monkeypatch_context.setattr(ether, "record_property", record_property)
monkeypatch_context.setattr(ether, "test_id", TestId.create(node=request.node))
yield


@pytest.fixture(autouse=True)
def ether_test_id_property_fixture(ether_setup: None, record_property: Callable[[str, object], None]) -> None:
assert ether.test_id is not None, "ether.test_id is None, did you forget to use the ether_setup fixture?"
record_property("test_id", json.dumps(ether.test_id.marshal(), ensure_ascii=True, sort_keys=True))


def make_old_setup_simulators_and_wallets(new: SimulatorsAndWallets) -> OldSimulatorsAndWallets:
return (
[simulator.peer_api for simulator in new.simulators],
Expand Down Expand Up @@ -131,16 +149,12 @@ def benchmark_runner_overhead_fixture() -> float:

@pytest.fixture(name="benchmark_runner")
def benchmark_runner_fixture(
request: SubRequest,
benchmark_runner_overhead: float,
record_property: Callable[[str, object], None],
benchmark_repeat: int,
) -> BenchmarkRunner:
label = request.node.name
return BenchmarkRunner(
label=label,
test_id=ether.test_id,
overhead=benchmark_runner_overhead,
record_property=record_property,
)


Expand Down Expand Up @@ -434,6 +448,13 @@ def pytest_addoption(parser: pytest.Parser):
type=int,
help=f"The number of times to run each benchmark, default {default_repeats}.",
)
group.addoption(
"--time-out-assert-repeats",
action="store",
default=default_repeats,
type=int,
help=f"The number of times to run each test with time out asserts, default {default_repeats}.",
)


def pytest_configure(config):
Expand All @@ -459,6 +480,22 @@ def benchmark_repeat_fixture() -> int:

globals()[benchmark_repeat_fixture.__name__] = benchmark_repeat_fixture

time_out_assert_repeats = config.getoption("--time-out-assert-repeats")
if time_out_assert_repeats != 1:

@pytest.fixture(
name="time_out_assert_repeat",
autouse=True,
params=[
pytest.param(repeat, id=f"time_out_assert_repeat{repeat:03d}")
for repeat in range(time_out_assert_repeats)
],
)
def time_out_assert_repeat_fixture(request: SubRequest) -> int:
return request.param

globals()[time_out_assert_repeat_fixture.__name__] = time_out_assert_repeat_fixture


def pytest_collection_modifyitems(session, config: pytest.Config, items: List[pytest.Function]):
# https://github.com/pytest-dev/pytest/issues/3730#issuecomment-567142496
Expand Down
19 changes: 19 additions & 0 deletions tests/ether.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from __future__ import annotations

from typing import TYPE_CHECKING, Callable, Optional

if TYPE_CHECKING:
from tests.util.misc import TestId

# NOTE: Do not just put any useful thing here. This is specifically for making
# fixture values globally available during tests. In _most_ cases fixtures
# should be directly requested using normal mechanisms. Very little should
# be put here.

# NOTE: When using this module do not import the attributes directly. Rather, import
# something like `from tests import ether`. Importing attributes directly will
# result in you likely getting the default `None` values since they are not
# populated until tests are running.

record_property: Optional[Callable[[str, object], None]] = None
test_id: Optional[TestId] = None
Loading
Loading