diff --git a/src/python/pants/backend/python/dependency_inference/rules_test.py b/src/python/pants/backend/python/dependency_inference/rules_test.py index 7bf68bd77ea..58e2878c04c 100644 --- a/src/python/pants/backend/python/dependency_inference/rules_test.py +++ b/src/python/pants/backend/python/dependency_inference/rules_test.py @@ -173,16 +173,14 @@ def test_infer_python_conftests(self) -> None: ) self.create_file("src/python/root/conftest.py") - self.add_to_build_file("src/python/root", "python_library()") + self.add_to_build_file("src/python/root", "python_library(sources=['conftest.py'])") self.create_file("src/python/root/mid/conftest.py") - self.add_to_build_file("src/python/root/mid", "python_library()") + self.add_to_build_file("src/python/root/mid", "python_library(sources=['conftest.py'])") self.create_file("src/python/root/mid/leaf/conftest.py") self.create_file("src/python/root/mid/leaf/this_is_a_test.py") - self.add_to_build_file( - "src/python/root/mid/leaf", "python_tests()\npython_library(name='conftest')" - ) + self.add_to_build_file("src/python/root/mid/leaf", "python_tests()") def run_dep_inference(address: Address) -> InferredDependencies: target = self.request_single_product( @@ -197,11 +195,6 @@ def run_dep_inference(address: Address) -> InferredDependencies: [ Address("src/python/root", relative_file_path="conftest.py", target_name="root"), Address("src/python/root/mid", relative_file_path="conftest.py", target_name="mid"), - Address( - "src/python/root/mid/leaf", - relative_file_path="conftest.py", - target_name="conftest", - ), ], sibling_dependencies_inferrable=False, ) diff --git a/src/python/pants/backend/python/rules/pytest_runner.py b/src/python/pants/backend/python/rules/pytest_runner.py index bc41ca30fc8..cd18d16ce88 100644 --- a/src/python/pants/backend/python/rules/pytest_runner.py +++ b/src/python/pants/backend/python/rules/pytest_runner.py @@ -4,6 +4,7 @@ import itertools import logging from dataclasses import dataclass +from pathlib import PurePath from typing import Optional, Tuple from uuid import UUID @@ -49,6 +50,14 @@ class PythonTestFieldSet(TestFieldSet): sources: PythonTestsSources timeout: PythonTestsTimeout + def is_conftest(self) -> bool: + """We skip `conftest.py`, even though it belongs to a `python_tests` target, because it does + not have any tests to run on.""" + return ( + not self.address.is_base_target + and PurePath(self.address.filename).name == "conftest.py" + ) + @dataclass(frozen=True) class TestTargetSetup: @@ -188,24 +197,27 @@ async def setup_pytest_for_target( ) +# TODO(#10618): Once this is fixed, move `TestTargetSetup` into an `await Get` so that we only set +# up the test if it isn't skipped. @rule(desc="Run Pytest") async def run_python_test( field_set: PythonTestFieldSet, - test_setup: TestTargetSetup, + setup: TestTargetSetup, global_options: GlobalOptions, test_subsystem: TestSubsystem, ) -> TestResult: - """Runs pytest for one target.""" - output_files = [] + if field_set.is_conftest(): + return TestResult.skipped(field_set.address) add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"] + output_files = [] # Configure generation of JUnit-compatible test report. test_results_file = None - if test_setup.xml_dir: + if setup.xml_dir: test_results_file = f"{field_set.address.path_safe_spec}.xml" add_opts.extend( - (f"--junitxml={test_results_file}", "-o", f"junit_family={test_setup.junit_family}") + (f"--junitxml={test_results_file}", "-o", f"junit_family={setup.junit_family}") ) output_files.append(test_results_file) @@ -213,10 +225,7 @@ async def run_python_test( if test_subsystem.use_coverage: output_files.append(".coverage") - env = { - "PYTEST_ADDOPTS": " ".join(add_opts), - "PEX_EXTRA_SYS_PATH": ":".join(test_setup.source_roots), - } + env = {"PYTEST_ADDOPTS": " ".join(add_opts), "PEX_EXTRA_SYS_PATH": ":".join(setup.source_roots)} if test_subsystem.force: # This is a slightly hacky way to force the process to run: since the env var @@ -232,14 +241,14 @@ async def run_python_test( result = await Get( FallibleProcessResult, PexProcess( - test_setup.test_runner_pex, - argv=test_setup.args, - input_digest=test_setup.input_digest, + setup.test_runner_pex, + argv=setup.args, + input_digest=setup.input_digest, output_files=tuple(output_files) if output_files else None, description=f"Run Pytest for {field_set.address}", - timeout_seconds=test_setup.timeout_seconds, + timeout_seconds=setup.timeout_seconds, extra_env=env, - execution_slot_variable=test_setup.execution_slot_variable, + execution_slot_variable=setup.execution_slot_variable, ), ) @@ -261,7 +270,7 @@ async def run_python_test( if xml_results_snapshot.files == (test_results_file,): xml_results_digest = await Get( Digest, - AddPrefix(xml_results_snapshot.digest, test_setup.xml_dir), # type: ignore[arg-type] + AddPrefix(xml_results_snapshot.digest, setup.xml_dir), # type: ignore[arg-type] ) else: logger.warning(f"Failed to generate JUnit XML data for {field_set.address}.") @@ -270,21 +279,17 @@ async def run_python_test( result, coverage_data=coverage_data, xml_results=xml_results_digest, - address_ref=field_set.address.spec, + address=field_set.address, ) @rule(desc="Run Pytest in an interactive process") -async def debug_python_test(test_setup: TestTargetSetup) -> TestDebugRequest: +def debug_python_test(setup: TestTargetSetup) -> TestDebugRequest: process = InteractiveProcess( - argv=(test_setup.test_runner_pex.output_filename, *test_setup.args), - input_digest=test_setup.input_digest, + argv=(setup.test_runner_pex.output_filename, *setup.args), input_digest=setup.input_digest, ) return TestDebugRequest(process) def rules(): - return [ - *collect_rules(), - UnionRule(TestFieldSet, PythonTestFieldSet), - ] + return [*collect_rules(), UnionRule(TestFieldSet, PythonTestFieldSet)] diff --git a/src/python/pants/backend/python/rules/pytest_runner_integration_test.py b/src/python/pants/backend/python/rules/pytest_runner_integration_test.py index 7e75e8e9875..13cdecd7e7e 100644 --- a/src/python/pants/backend/python/rules/pytest_runner_integration_test.py +++ b/src/python/pants/backend/python/rules/pytest_runner_integration_test.py @@ -133,6 +133,7 @@ def rules(cls): def run_pytest( self, *, + address: Optional[Address] = None, passthrough_args: Optional[str] = None, junit_xml_dir: Optional[str] = None, use_coverage: bool = False, @@ -153,11 +154,10 @@ def run_pytest( args.append("--test-use-coverage") if execution_slot_var: args.append(f"--pytest-execution-slot-var={execution_slot_var}") - + if not address: + address = Address(self.package, target_name="target") params = Params( - PythonTestFieldSet.create( - PythonTests({}, address=Address(self.package, target_name="target")) - ), + PythonTestFieldSet.create(PythonTests({}, address=address)), create_options_bootstrapper(args=args), ) test_result = self.request_single_product(TestResult, params) @@ -362,19 +362,25 @@ def test_coverage(self) -> None: assert f"{self.package}/test_good.py ." in result.stdout assert result.coverage_data is not None - def test_conftest_injection(self) -> None: + def test_conftest_handling(self) -> None: + """Tests that we a) inject a dependency on conftest.py and b) skip running directly on + conftest.py.""" self.create_python_test_target([self.good_source]) - self.create_file( - relpath=PurePath(self.source_root, self.conftest_source.path).as_posix(), - contents=self.conftest_source.content.decode(), + PurePath(self.source_root, self.conftest_source.path).as_posix(), + self.conftest_source.content.decode(), ) - self.add_to_build_file(self.source_root, "python_library()") + self.add_to_build_file(self.source_root, "python_tests()") result = self.run_pytest(passthrough_args="-s") assert result.status == Status.SUCCESS assert f"{self.package}/test_good.py In conftest!\n." in result.stdout + result = self.run_pytest( + address=Address(self.source_root, relative_file_path="conftest.py") + ) + assert result.status == Status.SKIPPED + def test_execution_slot_variable(self) -> None: source = FileContent( path="test_concurrency_slot.py", diff --git a/src/python/pants/backend/python/target_types.py b/src/python/pants/backend/python/target_types.py index 25c36990b68..4b7068e128e 100644 --- a/src/python/pants/backend/python/target_types.py +++ b/src/python/pants/backend/python/target_types.py @@ -243,7 +243,7 @@ class PythonBinary(Target): class PythonTestsSources(PythonSources): - default = ("test_*.py", "*_test.py", "tests.py") + default = ("test_*.py", "*_test.py", "tests.py", "conftest.py") class PythonTestsTimeout(IntField): @@ -284,7 +284,7 @@ class PythonTests(Target): These may be written in either Pytest-style or unittest style. - All test util code, including `conftest.py`, should go into a dedicated `python_library()` + All test util code, other than `conftest.py`, should go into a dedicated `python_library()` target and then be included in the `dependencies` field. See https://www.pantsbuild.org/docs/python-test-goal. diff --git a/src/python/pants/core/goals/test.py b/src/python/pants/core/goals/test.py index ebaed849f6c..9f14086593d 100644 --- a/src/python/pants/core/goals/test.py +++ b/src/python/pants/core/goals/test.py @@ -2,6 +2,7 @@ # Licensed under the Apache License, Version 2.0 (see LICENSE). import itertools +import logging from abc import ABC, ABCMeta from dataclasses import dataclass from enum import Enum @@ -31,10 +32,13 @@ from pants.engine.unions import UnionMembership, union from pants.util.logging import LogLevel +logger = logging.getLogger(__name__) + class Status(Enum): SUCCESS = "SUCCESS" FAILURE = "FAILURE" + SKIPPED = "SKIP" class CoverageReportType(Enum): @@ -61,28 +65,40 @@ class TestResult(EngineAware): status: Status stdout: str stderr: str + address: Address coverage_data: Optional["CoverageData"] xml_results: Optional[Digest] - address_ref: str = "" # Prevent this class from being detected by pytest as a test class. __test__ = False - @staticmethod + @classmethod + def skipped(cls, address: Address) -> "TestResult": + return cls( + status=Status.SKIPPED, + stdout="", + stderr="", + address=address, + coverage_data=None, + xml_results=None, + ) + + @classmethod def from_fallible_process_result( + cls, process_result: FallibleProcessResult, + address: Address, *, coverage_data: Optional["CoverageData"] = None, xml_results: Optional[Digest] = None, - address_ref: str = "", ) -> "TestResult": - return TestResult( + return cls( status=Status.SUCCESS if process_result.exit_code == 0 else Status.FAILURE, stdout=process_result.stdout.decode(), stderr=process_result.stderr.decode(), + address=address, coverage_data=coverage_data, xml_results=xml_results, - address_ref=address_ref, ) def artifacts(self) -> Optional[Dict[str, Digest]]: @@ -97,7 +113,7 @@ def level(self): def message(self): result = "succeeded" if self.status == Status.SUCCESS else "failed" - return f"tests {result}: {self.address_ref}" + return f"tests {result}: {self.address}" @dataclass(frozen=True) @@ -330,6 +346,8 @@ async def run_tests( # Print details. for result in results: + if result.test_result.status == Status.SKIPPED: + continue if test_subsystem.options.output == ShowOutput.NONE or ( test_subsystem.options.output == ShowOutput.FAILED and result.test_result.status == Status.SUCCESS @@ -353,6 +371,8 @@ async def run_tests( # Print summary console.print_stderr("") for result in results: + if result.test_result.status == Status.SKIPPED: + continue color = console.green if result.test_result.status == Status.SUCCESS else console.red # The right-align logic sees the color control codes as characters, so we have # to account for that. In f-strings the alignment field widths must be literals, diff --git a/src/python/pants/core/goals/test_test.py b/src/python/pants/core/goals/test_test.py index e3e5849cec6..12e3bfc4453 100644 --- a/src/python/pants/core/goals/test_test.py +++ b/src/python/pants/core/goals/test_test.py @@ -78,6 +78,7 @@ def test_result(self) -> TestResult: status=self.status(self.address), stdout=self.stdout(self.address), stderr=self.stderr(self.address), + address=self.address, coverage_data=MockCoverageData(self.address), xml_results=None, )