From 799b9fa77d6727bb22443820f9252cb88afd47bd Mon Sep 17 00:00:00 2001 From: Padraic Slattery Date: Thu, 16 Jan 2025 19:04:39 +0100 Subject: [PATCH] Add flag to print all failures to console --- action.yml | 16 +++++++- docs/getting_started.md | 1 + src/dbt_bouncer/main.py | 7 ++++ src/dbt_bouncer/runner.py | 10 +++-- src/dbt_bouncer/utils.py | 11 ++++-- tests/unit/test_runner.py | 4 ++ tests/unit/test_utils.py | 82 ++++++++++++++++++++++++++++++++++++++- 7 files changed, 122 insertions(+), 9 deletions(-) diff --git a/action.yml b/action.yml index 72bc61b0..9a75d28a 100644 --- a/action.yml +++ b/action.yml @@ -15,6 +15,10 @@ inputs: true: Send a comment to the GitHub PR with a list of failed checks. false: Do not send a comment. required: false + show-all-failures: + default: 'false' + description: Print all failures to the console. + required: false verbose: default: 'false' description: Run dbt-bouncer in verbose mode. @@ -34,6 +38,16 @@ runs: echo "output-file-param=--output-file /app/${{ inputs.output-file }}" >> $GITHUB_OUTPUT fi + - name: Assemble `show-all-failures` parameter + id: assemble-show-all-failures-param + shell: bash + run: > + if [[ "${{ inputs.show-all-failures }}" = "false" ]]; then + echo "show-all-failures-param=" >> $GITHUB_OUTPUT + else + echo "show-all-failures-param=-v" >> $GITHUB_OUTPUT + fi + - name: Assemble `verbose` parameter id: assemble-verbose-param shell: bash @@ -58,7 +72,7 @@ runs: ghcr.io/godatadriven/dbt-bouncer:v0.0.0 \ --config-file /app/${{ inputs.config-file }} \ --create-pr-comment-file ${{ inputs.send-pr-comment }} \ - ${{ steps.assemble-output-file-param.outputs.output-file-param }} ${{ steps.assemble-verbose-param.outputs.verbose-param }} + ${{ steps.assemble-output-file-param.outputs.output-file-param }} ${{ steps.assemble-show-all-failures-param.outputs.show-all-failures-param }} ${{ steps.assemble-verbose-param.outputs.verbose-param }} - name: Send PR comment if: always() && inputs.send-pr-comment == 'true' && steps.run-dbt-bouncer.outcome != 'success' diff --git a/docs/getting_started.md b/docs/getting_started.md index 48c34064..15222b5c 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -84,6 +84,7 @@ jobs: config-file: ./ output-file: results.json # optional, default does not save a results file send-pr-comment: true # optional, defaults to true + show-all-failures: false # optional, defaults to false verbose: false # optional, defaults to false ``` diff --git a/src/dbt_bouncer/main.py b/src/dbt_bouncer/main.py index e4dbe1ae..4fb9a276 100644 --- a/src/dbt_bouncer/main.py +++ b/src/dbt_bouncer/main.py @@ -31,6 +31,11 @@ required=False, type=Path, ) +@click.option( + "--show-all-failures", + help="If passed then all failures will be printed to the console.", + is_flag=True, +) @click.option("-v", "--verbosity", help="Verbosity.", default=0, count=True) @click.pass_context @click.version_option() @@ -39,6 +44,7 @@ def cli( config_file: PurePath, create_pr_comment_file: bool, output_file: Union[Path, None], + show_all_failures: bool, verbosity: int, ) -> None: """Entrypoint for dbt-bouncer. @@ -151,6 +157,7 @@ def cli( output_file=output_file, run_results=project_run_results, semantic_models=project_semantic_models, + show_all_failures=show_all_failures, sources=project_sources, tests=project_tests, unit_tests=project_unit_tests, diff --git a/src/dbt_bouncer/runner.py b/src/dbt_bouncer/runner.py index 948b643d..ac67f68d 100644 --- a/src/dbt_bouncer/runner.py +++ b/src/dbt_bouncer/runner.py @@ -50,6 +50,7 @@ def runner( output_file: Union[Path, None], run_results: List["DbtBouncerRunResult"], semantic_models: List["DbtBouncerSemanticModel"], + show_all_failures: bool, sources: List["DbtBouncerSource"], tests: List["DbtBouncerTest"], unit_tests: List["UnitTests"], @@ -192,8 +193,8 @@ def runner( f"`dbt-bouncer` {'failed' if num_checks_error > 0 else 'has warnings'}. Please see below for more details or run `dbt-bouncer` with the `-v` flag." + ( "" - if num_checks_error < 25 - else " More than 25 checks failed, to see a full list of all failed checks re-run `dbt-bouncer` with the `--output-file` flag." + if num_checks_error < 25 or show_all_failures + else " More than 25 checks failed, to see a full list of all failed checks re-run `dbt-bouncer` with (one of) the `--output-file` or `--show-all-failures` flags." ) ) failed_checks = [ @@ -209,7 +210,7 @@ def runner( logger( ("Failed checks:\n" if num_checks_error > 0 else "Warning checks:\n") + tabulate( - failed_checks[:25], # Print max of 25 failed tests to console + failed_checks if show_all_failures else failed_checks[:25], headers={ "check_run_id": "Check name", "severity": "Severity", @@ -223,7 +224,8 @@ def runner( create_github_comment_file( failed_checks=[ [f["check_run_id"], f["failure_message"]] for f in failed_checks - ] + ], + show_all_failures=show_all_failures, ) logging.info( diff --git a/src/dbt_bouncer/utils.py b/src/dbt_bouncer/utils.py index 4ed0ebdf..6876ef2c 100644 --- a/src/dbt_bouncer/utils.py +++ b/src/dbt_bouncer/utils.py @@ -30,15 +30,20 @@ def clean_path_str(path: str) -> str: return path.replace("\\", "/") if path is not None else "" -def create_github_comment_file(failed_checks: List[List[str]]) -> None: +def create_github_comment_file( + failed_checks: List[List[str]], show_all_failures: bool +) -> None: """Create a markdown file containing a comment for GitHub.""" md_formatted_comment = make_markdown_table( - [["Check name", "Failure message"], *sorted(failed_checks[:25])], + [ + ["Check name", "Failure message"], + *sorted(failed_checks if show_all_failures else failed_checks[:25]), + ], ) # Would like to be more specific and include the job ID, but it's not exposed as an environment variable: https://github.com/actions/runner/issues/324 md_formatted_comment = f"## **Failed `dbt-bouncer`** checks\n\n{md_formatted_comment}\n\nSent from this [GitHub Action workflow run](https://github.com/{os.environ.get('GITHUB_REPOSITORY', None)}/actions/runs/{os.environ.get('GITHUB_RUN_ID', None)})." - if len(failed_checks) > 25: + if len(failed_checks) > 25 and not show_all_failures: md_formatted_comment += f"\n\n**Note:** Only the first 25 failed checks (of {len(failed_checks)}) are shown." logging.debug(f"{md_formatted_comment=}") diff --git a/tests/unit/test_runner.py b/tests/unit/test_runner.py index 2f2a78b5..a63c3d22 100644 --- a/tests/unit/test_runner.py +++ b/tests/unit/test_runner.py @@ -155,6 +155,7 @@ def test_runner_coverage(caplog, tmp_path): output_file=tmp_path / "coverage.json", run_results=[], semantic_models=[], + show_all_failures=False, sources=[], tests=[], unit_tests=[], @@ -289,6 +290,7 @@ def test_runner_failure(): output_file=None, run_results=[], semantic_models=[], + show_all_failures=False, sources=[], tests=[], unit_tests=[], @@ -415,6 +417,7 @@ def test_runner_success(): output_file=None, run_results=[], semantic_models=[], + show_all_failures=False, sources=[], tests=[], unit_tests=[], @@ -521,6 +524,7 @@ def test_runner_windows(caplog, tmp_path): output_file=tmp_path / "coverage.json", run_results=[], semantic_models=[], + show_all_failures=False, sources=[], tests=[], unit_tests=[], diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 84d1e297..4aec0e2a 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -20,13 +20,93 @@ def test_create_github_comment_file(monkeypatch, tmp_path): ["check_model_description_populated", "message_1"], ["check_model_description_populated", "message_2"], ] - create_github_comment_file(failed_checks) + create_github_comment_file(failed_checks, show_all_failures=False) assert ( (tmp_path / "github-comment.md").read_text() == "## **Failed `dbt-bouncer`** checks\n\n\n| Check name | Failure message |\n| :--- | :--- |\n| check_model_description_populated | message_1 |\n| check_model_description_populated | message_2 |\n\n\nSent from this [GitHub Action workflow run](https://github.com/None/actions/runs/None)." ) +def test_create_github_comment_file_show_all_failures_false(monkeypatch, tmp_path): + monkeypatch.chdir(tmp_path) + + with mock.patch.dict(os.environ, clear=True): + failed_checks = [ + ["check_model_description_populated", "message_1"], + ["check_model_description_populated", "message_2"], + ["check_model_description_populated", "message_3"], + ["check_model_description_populated", "message_4"], + ["check_model_description_populated", "message_5"], + ["check_model_description_populated", "message_6"], + ["check_model_description_populated", "message_7"], + ["check_model_description_populated", "message_8"], + ["check_model_description_populated", "message_9"], + ["check_model_description_populated", "message_10"], + ["check_model_description_populated", "message_11"], + ["check_model_description_populated", "message_12"], + ["check_model_description_populated", "message_13"], + ["check_model_description_populated", "message_14"], + ["check_model_description_populated", "message_15"], + ["check_model_description_populated", "message_16"], + ["check_model_description_populated", "message_17"], + ["check_model_description_populated", "message_18"], + ["check_model_description_populated", "message_19"], + ["check_model_description_populated", "message_20"], + ["check_model_description_populated", "message_21"], + ["check_model_description_populated", "message_22"], + ["check_model_description_populated", "message_23"], + ["check_model_description_populated", "message_24"], + ["check_model_description_populated", "message_25"], + ["check_model_description_populated", "message_26"], + ["check_model_description_populated", "message_27"], + ["check_model_description_populated", "message_28"], + ["check_model_description_populated", "message_29"], + ["check_model_description_populated", "message_30"], + ] + create_github_comment_file(failed_checks, show_all_failures=False) + assert len((tmp_path / "github-comment.md").read_text().split("\n")) == 35 + + +def test_create_github_comment_file_show_all_failures_true(monkeypatch, tmp_path): + monkeypatch.chdir(tmp_path) + + with mock.patch.dict(os.environ, clear=True): + failed_checks = [ + ["check_model_description_populated", "message_1"], + ["check_model_description_populated", "message_2"], + ["check_model_description_populated", "message_3"], + ["check_model_description_populated", "message_4"], + ["check_model_description_populated", "message_5"], + ["check_model_description_populated", "message_6"], + ["check_model_description_populated", "message_7"], + ["check_model_description_populated", "message_8"], + ["check_model_description_populated", "message_9"], + ["check_model_description_populated", "message_10"], + ["check_model_description_populated", "message_11"], + ["check_model_description_populated", "message_12"], + ["check_model_description_populated", "message_13"], + ["check_model_description_populated", "message_14"], + ["check_model_description_populated", "message_15"], + ["check_model_description_populated", "message_16"], + ["check_model_description_populated", "message_17"], + ["check_model_description_populated", "message_18"], + ["check_model_description_populated", "message_19"], + ["check_model_description_populated", "message_20"], + ["check_model_description_populated", "message_21"], + ["check_model_description_populated", "message_22"], + ["check_model_description_populated", "message_23"], + ["check_model_description_populated", "message_24"], + ["check_model_description_populated", "message_25"], + ["check_model_description_populated", "message_26"], + ["check_model_description_populated", "message_27"], + ["check_model_description_populated", "message_28"], + ["check_model_description_populated", "message_29"], + ["check_model_description_populated", "message_30"], + ] + create_github_comment_file(failed_checks, show_all_failures=True) + assert len((tmp_path / "github-comment.md").read_text().split("\n")) == 38 + + @pytest.mark.parametrize( ("data_in", "data_out"), [