Skip to content

Commit

Permalink
Revert #10603 so that conftest.py belongs to python_tests again (#…
Browse files Browse the repository at this point in the history
…10619)

In #10603, we moved `conftest.py` from `python_tests` to `python_library`. This is because Pants now runs file-at-a-time for Pytest thanks to the model change in #10511. This causes `conftest.py` to error because it doesn't have any test files.

We decided in #10603 that `conftest.py` should be under `python_library` because it is not actual tests. For example, test util code goes under a `python_library` target already.

However, `python_library` owning a `conftest.py` by default ended up being problematic, as it caused test code to be mixed with production code, which is generally not desired. See #10613 for an approach to fix this.

Instead of adding a `conftest` target, this PR instead moves back `conftest.py` to `python_tests` and special cases `pytest_runner.py` to skip `conftest.py`. Even though this is less correct, it's simpler for users and it avoids making 1.x users having to change a bunch of things. Conceptually, `conftest.py` can be seen as "config" for tests, rather than traditional "test utils" like you'd have in a `python_library`.

[ci skip-rust]
  • Loading branch information
Eric-Arellano authored Aug 15, 2020
1 parent a4c022e commit 659ffd6
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 50 deletions.
13 changes: 3 additions & 10 deletions src/python/pants/backend/python/dependency_inference/rules_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,16 +173,14 @@ def test_infer_python_conftests(self) -> None:
)

self.create_file("src/python/root/conftest.py")
self.add_to_build_file("src/python/root", "python_library()")
self.add_to_build_file("src/python/root", "python_library(sources=['conftest.py'])")

self.create_file("src/python/root/mid/conftest.py")
self.add_to_build_file("src/python/root/mid", "python_library()")
self.add_to_build_file("src/python/root/mid", "python_library(sources=['conftest.py'])")

self.create_file("src/python/root/mid/leaf/conftest.py")
self.create_file("src/python/root/mid/leaf/this_is_a_test.py")
self.add_to_build_file(
"src/python/root/mid/leaf", "python_tests()\npython_library(name='conftest')"
)
self.add_to_build_file("src/python/root/mid/leaf", "python_tests()")

def run_dep_inference(address: Address) -> InferredDependencies:
target = self.request_single_product(
Expand All @@ -197,11 +195,6 @@ def run_dep_inference(address: Address) -> InferredDependencies:
[
Address("src/python/root", relative_file_path="conftest.py", target_name="root"),
Address("src/python/root/mid", relative_file_path="conftest.py", target_name="mid"),
Address(
"src/python/root/mid/leaf",
relative_file_path="conftest.py",
target_name="conftest",
),
],
sibling_dependencies_inferrable=False,
)
51 changes: 28 additions & 23 deletions src/python/pants/backend/python/rules/pytest_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import itertools
import logging
from dataclasses import dataclass
from pathlib import PurePath
from typing import Optional, Tuple
from uuid import UUID

Expand Down Expand Up @@ -49,6 +50,14 @@ class PythonTestFieldSet(TestFieldSet):
sources: PythonTestsSources
timeout: PythonTestsTimeout

def is_conftest(self) -> bool:
"""We skip `conftest.py`, even though it belongs to a `python_tests` target, because it does
not have any tests to run on."""
return (
not self.address.is_base_target
and PurePath(self.address.filename).name == "conftest.py"
)


@dataclass(frozen=True)
class TestTargetSetup:
Expand Down Expand Up @@ -188,35 +197,35 @@ async def setup_pytest_for_target(
)


# TODO(#10618): Once this is fixed, move `TestTargetSetup` into an `await Get` so that we only set
# up the test if it isn't skipped.
@rule(desc="Run Pytest")
async def run_python_test(
field_set: PythonTestFieldSet,
test_setup: TestTargetSetup,
setup: TestTargetSetup,
global_options: GlobalOptions,
test_subsystem: TestSubsystem,
) -> TestResult:
"""Runs pytest for one target."""
output_files = []
if field_set.is_conftest():
return TestResult.skipped(field_set.address)

add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]

output_files = []
# Configure generation of JUnit-compatible test report.
test_results_file = None
if test_setup.xml_dir:
if setup.xml_dir:
test_results_file = f"{field_set.address.path_safe_spec}.xml"
add_opts.extend(
(f"--junitxml={test_results_file}", "-o", f"junit_family={test_setup.junit_family}")
(f"--junitxml={test_results_file}", "-o", f"junit_family={setup.junit_family}")
)
output_files.append(test_results_file)

# Configure generation of a coverage report.
if test_subsystem.use_coverage:
output_files.append(".coverage")

env = {
"PYTEST_ADDOPTS": " ".join(add_opts),
"PEX_EXTRA_SYS_PATH": ":".join(test_setup.source_roots),
}
env = {"PYTEST_ADDOPTS": " ".join(add_opts), "PEX_EXTRA_SYS_PATH": ":".join(setup.source_roots)}

if test_subsystem.force:
# This is a slightly hacky way to force the process to run: since the env var
Expand All @@ -232,14 +241,14 @@ async def run_python_test(
result = await Get(
FallibleProcessResult,
PexProcess(
test_setup.test_runner_pex,
argv=test_setup.args,
input_digest=test_setup.input_digest,
setup.test_runner_pex,
argv=setup.args,
input_digest=setup.input_digest,
output_files=tuple(output_files) if output_files else None,
description=f"Run Pytest for {field_set.address}",
timeout_seconds=test_setup.timeout_seconds,
timeout_seconds=setup.timeout_seconds,
extra_env=env,
execution_slot_variable=test_setup.execution_slot_variable,
execution_slot_variable=setup.execution_slot_variable,
),
)

Expand All @@ -261,7 +270,7 @@ async def run_python_test(
if xml_results_snapshot.files == (test_results_file,):
xml_results_digest = await Get(
Digest,
AddPrefix(xml_results_snapshot.digest, test_setup.xml_dir), # type: ignore[arg-type]
AddPrefix(xml_results_snapshot.digest, setup.xml_dir), # type: ignore[arg-type]
)
else:
logger.warning(f"Failed to generate JUnit XML data for {field_set.address}.")
Expand All @@ -270,21 +279,17 @@ async def run_python_test(
result,
coverage_data=coverage_data,
xml_results=xml_results_digest,
address_ref=field_set.address.spec,
address=field_set.address,
)


@rule(desc="Run Pytest in an interactive process")
async def debug_python_test(test_setup: TestTargetSetup) -> TestDebugRequest:
def debug_python_test(setup: TestTargetSetup) -> TestDebugRequest:
process = InteractiveProcess(
argv=(test_setup.test_runner_pex.output_filename, *test_setup.args),
input_digest=test_setup.input_digest,
argv=(setup.test_runner_pex.output_filename, *setup.args), input_digest=setup.input_digest,
)
return TestDebugRequest(process)


def rules():
return [
*collect_rules(),
UnionRule(TestFieldSet, PythonTestFieldSet),
]
return [*collect_rules(), UnionRule(TestFieldSet, PythonTestFieldSet)]
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ def rules(cls):
def run_pytest(
self,
*,
address: Optional[Address] = None,
passthrough_args: Optional[str] = None,
junit_xml_dir: Optional[str] = None,
use_coverage: bool = False,
Expand All @@ -153,11 +154,10 @@ def run_pytest(
args.append("--test-use-coverage")
if execution_slot_var:
args.append(f"--pytest-execution-slot-var={execution_slot_var}")

if not address:
address = Address(self.package, target_name="target")
params = Params(
PythonTestFieldSet.create(
PythonTests({}, address=Address(self.package, target_name="target"))
),
PythonTestFieldSet.create(PythonTests({}, address=address)),
create_options_bootstrapper(args=args),
)
test_result = self.request_single_product(TestResult, params)
Expand Down Expand Up @@ -362,19 +362,25 @@ def test_coverage(self) -> None:
assert f"{self.package}/test_good.py ." in result.stdout
assert result.coverage_data is not None

def test_conftest_injection(self) -> None:
def test_conftest_handling(self) -> None:
"""Tests that we a) inject a dependency on conftest.py and b) skip running directly on
conftest.py."""
self.create_python_test_target([self.good_source])

self.create_file(
relpath=PurePath(self.source_root, self.conftest_source.path).as_posix(),
contents=self.conftest_source.content.decode(),
PurePath(self.source_root, self.conftest_source.path).as_posix(),
self.conftest_source.content.decode(),
)
self.add_to_build_file(self.source_root, "python_library()")
self.add_to_build_file(self.source_root, "python_tests()")

result = self.run_pytest(passthrough_args="-s")
assert result.status == Status.SUCCESS
assert f"{self.package}/test_good.py In conftest!\n." in result.stdout

result = self.run_pytest(
address=Address(self.source_root, relative_file_path="conftest.py")
)
assert result.status == Status.SKIPPED

def test_execution_slot_variable(self) -> None:
source = FileContent(
path="test_concurrency_slot.py",
Expand Down
4 changes: 2 additions & 2 deletions src/python/pants/backend/python/target_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ class PythonBinary(Target):


class PythonTestsSources(PythonSources):
default = ("test_*.py", "*_test.py", "tests.py")
default = ("test_*.py", "*_test.py", "tests.py", "conftest.py")


class PythonTestsTimeout(IntField):
Expand Down Expand Up @@ -284,7 +284,7 @@ class PythonTests(Target):
These may be written in either Pytest-style or unittest style.
All test util code, including `conftest.py`, should go into a dedicated `python_library()`
All test util code, other than `conftest.py`, should go into a dedicated `python_library()`
target and then be included in the `dependencies` field.
See https://www.pantsbuild.org/docs/python-test-goal.
Expand Down
32 changes: 26 additions & 6 deletions src/python/pants/core/goals/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# Licensed under the Apache License, Version 2.0 (see LICENSE).

import itertools
import logging
from abc import ABC, ABCMeta
from dataclasses import dataclass
from enum import Enum
Expand Down Expand Up @@ -31,10 +32,13 @@
from pants.engine.unions import UnionMembership, union
from pants.util.logging import LogLevel

logger = logging.getLogger(__name__)


class Status(Enum):
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
SKIPPED = "SKIP"


class CoverageReportType(Enum):
Expand All @@ -61,28 +65,40 @@ class TestResult(EngineAware):
status: Status
stdout: str
stderr: str
address: Address
coverage_data: Optional["CoverageData"]
xml_results: Optional[Digest]
address_ref: str = ""

# Prevent this class from being detected by pytest as a test class.
__test__ = False

@staticmethod
@classmethod
def skipped(cls, address: Address) -> "TestResult":
return cls(
status=Status.SKIPPED,
stdout="",
stderr="",
address=address,
coverage_data=None,
xml_results=None,
)

@classmethod
def from_fallible_process_result(
cls,
process_result: FallibleProcessResult,
address: Address,
*,
coverage_data: Optional["CoverageData"] = None,
xml_results: Optional[Digest] = None,
address_ref: str = "",
) -> "TestResult":
return TestResult(
return cls(
status=Status.SUCCESS if process_result.exit_code == 0 else Status.FAILURE,
stdout=process_result.stdout.decode(),
stderr=process_result.stderr.decode(),
address=address,
coverage_data=coverage_data,
xml_results=xml_results,
address_ref=address_ref,
)

def artifacts(self) -> Optional[Dict[str, Digest]]:
Expand All @@ -97,7 +113,7 @@ def level(self):

def message(self):
result = "succeeded" if self.status == Status.SUCCESS else "failed"
return f"tests {result}: {self.address_ref}"
return f"tests {result}: {self.address}"


@dataclass(frozen=True)
Expand Down Expand Up @@ -330,6 +346,8 @@ async def run_tests(

# Print details.
for result in results:
if result.test_result.status == Status.SKIPPED:
continue
if test_subsystem.options.output == ShowOutput.NONE or (
test_subsystem.options.output == ShowOutput.FAILED
and result.test_result.status == Status.SUCCESS
Expand All @@ -353,6 +371,8 @@ async def run_tests(
# Print summary
console.print_stderr("")
for result in results:
if result.test_result.status == Status.SKIPPED:
continue
color = console.green if result.test_result.status == Status.SUCCESS else console.red
# The right-align logic sees the color control codes as characters, so we have
# to account for that. In f-strings the alignment field widths must be literals,
Expand Down
1 change: 1 addition & 0 deletions src/python/pants/core/goals/test_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def test_result(self) -> TestResult:
status=self.status(self.address),
stdout=self.stdout(self.address),
stderr=self.stderr(self.address),
address=self.address,
coverage_data=MockCoverageData(self.address),
xml_results=None,
)
Expand Down

0 comments on commit 659ffd6

Please sign in to comment.