From 947aa8e0775d6aa6777c0860772436d486f869b3 Mon Sep 17 00:00:00 2001 From: pradnyeshjoshi Date: Tue, 5 Jul 2022 22:51:30 +0000 Subject: [PATCH] skip spark tuning test; remove unused git actions --- .../workflows/actions/merge-cov/action.yml | 55 --------------- .../workflows/actions/run-tests/action.yml | 68 ------------------- tests/ci/azureml_tests/test_groups.py | 8 ++- 3 files changed, 5 insertions(+), 126 deletions(-) delete mode 100644 .github/workflows/actions/merge-cov/action.yml delete mode 100644 .github/workflows/actions/run-tests/action.yml diff --git a/.github/workflows/actions/merge-cov/action.yml b/.github/workflows/actions/merge-cov/action.yml deleted file mode 100644 index 736eb7e958..0000000000 --- a/.github/workflows/actions/merge-cov/action.yml +++ /dev/null @@ -1,55 +0,0 @@ -# Encapsulate the steps to merge code coverage reports -# You can call this action wit the following: -# -# steps: -# ... -# # make sure to checkout the code before running the local action -# - uses: actions/checkout@v2 -# - uses: ./.github/workflows/actions/merge-cov - -name: 'Merge code coverage reports' -description: 'Merge the coverage reports from all of the different test runs.' - -outputs: - # return the filename of the final coverage report (if needed by downstream) - cov-report-filename: - description: "Filename of the code coverage report" - value: ${{ steps.show-final-report.outputs.cov-report-final }} - -runs: - using: "composite" - steps: - - name: Install dev-dependencies - shell: bash - run: | - pip install --upgrade pip setuptools wheel - python -m pip install coverage - - - name: Show downloaded coverage reports - shell: bash - run: ls .coverage* - - # Merge code coverage reports so the coverage numbers are accurate across - # different parallelized runs - - name: Merge coverage reports - shell: bash - # NOTE: Merging code reports generated from self-hosted (aml) runners can be problematic - # as reports may reference the source code at an unresolvable absolute path. - # For example, you may encounter errors like: - # "CoverageWarning: Couldn't parse - # '/home/azureuser/runner/work/recommenders/recommenders/recommenders/evaluation/__init__.py': - # No source for code" - # Work-around: Creating a symlink at the root that points to the default local runner folder: '/home/runner/work/recommenders' - run: | - sudo mkdir -p /home/azureuser - sudo ln -s /home/runner /home/azureuser/runner - python -m coverage combine .coverage* - python -m coverage report -i - python -m coverage xml -i - - - name: Show merged report - id: show-final-report - shell: bash - run: | - echo "Final code coverage report: '$(ls *.xml)'" - echo "::set-output name=cov-report-final::$(echo $(ls *.xml))" diff --git a/.github/workflows/actions/run-tests/action.yml b/.github/workflows/actions/run-tests/action.yml deleted file mode 100644 index b26e89fd2e..0000000000 --- a/.github/workflows/actions/run-tests/action.yml +++ /dev/null @@ -1,68 +0,0 @@ -# Encapsulate the steps to run pytests -# You can call this action wit the following: -# -# steps: -# ... -# # make sure to checkout the code before running the local action -# - uses: actions/checkout@v2 -# - uses: ./.github/workflows/actions/run-tests -# with: -# tox-env: 'spark' -# test-kind: 'unit' -# test-marker: 'spark and notebooks' - -name: 'Run Python tests' -description: 'Specify parameters to configure test subsets to run and collect test report for.' -inputs: - tox-env: - description: "Name of the tox env. EX) cpu|gpu|spark - See tox.ini at root level for more info" - required: true - # Options are "cpu|gpu|spark|all" and any default tox env. - # For more info on default tox env, see https://tox.readthedocs.io/en/latest/config.html#tox-environments - default: 'all' - test-kind: - description: "The kinds of tests to run. EX) unit|integration|smoke - This maps to those in the 'tests/' folder" - required: true - default: 'unit' - test-marker: - description: "Finer filter for selecting the tests to run with pytest markers. - See https://docs.pytest.org/en/6.2.x/example/markers.html" - default: 'not gpu and not notebooks and not spark and not experimental' -outputs: - cov-report-filename: - description: "Filename of the code coverage report" - value: ${{ steps.rename-cov-report.outputs.cov-report-filename }} - -runs: - using: "composite" - steps: - ################# Run Python tests ################# - - name: Install build dependencies (tox) - shell: bash - run: | - python -m pip install --upgrade pip setuptools wheel - sudo apt-get update - sudo apt-get install -y build-essential libpython3.6 libpython3.7 - pip install tox - - - name: Run ${{ inputs.test-kind }} tests ('${{ inputs.test-marker }}') - shell: bash - # '-e py' will use the default 'python' executable found in system path - # for why using tox, see: https://tox.readthedocs.io/en/latest/index.html - # tox will do: - # 1. create a virtual env - # 2. build and install source distribution (sdist) - # 3. run the specified tests - # 4. show test reports - run: | - tox -ve ${{ inputs.tox-env }} -- tests/${{ inputs.test-kind }} -m '${{ inputs.test-marker }}' - - - name: Prepare Code Coverage Report - id: rename-cov-report - shell: bash - run: | - mv .coverage '.coverage_${{ inputs.test-marker }}_${{ inputs.test-kind }}_${{ github.sha }}' - echo "Coverage report renamed to: '$(ls .coverage*)'" - echo "::set-output name=cov-report-filename::$(echo $(ls .coverage*))" diff --git a/tests/ci/azureml_tests/test_groups.py b/tests/ci/azureml_tests/test_groups.py index 00cf18cf82..24b5b25710 100644 --- a/tests/ci/azureml_tests/test_groups.py +++ b/tests/ci/azureml_tests/test_groups.py @@ -178,9 +178,11 @@ "tests/unit/recommenders/evaluation/test_spark_evaluation.py::test_distributional_coverage", "tests/unit/recommenders/datasets/test_spark_splitter.py::test_min_rating_filter", ], - "group_notebooks_pyspark_001": [ # Total group time: 746.53s - "tests/unit/examples/test_notebooks_pyspark.py::test_spark_tuning", # 212.29s+190.02s+180.13s+164.09s (flaky test, it rerun several times) - ], + # TODO: This is a flaky test, skip for now, to be fixed in future iterations. + # Refer to the issue: https://github.com/microsoft/recommenders/issues/1770 + # "group_notebooks_pyspark_001": [ # Total group time: 746.53s + # "tests/unit/examples/test_notebooks_pyspark.py::test_spark_tuning", # 212.29s+190.02s+180.13s+164.09s (flaky test, it rerun several times) + # ], "group_notebooks_pyspark_002": [ # Total group time: 728.43s "tests/unit/examples/test_notebooks_pyspark.py::test_als_deep_dive_runs", "tests/unit/examples/test_notebooks_pyspark.py::test_data_split_runs",