Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into autoapi
Browse files Browse the repository at this point in the history
* upstream/main: (143 commits)
  Updated environment lockfiles (SciTools#6020)
  Add `MeshCoord.collapsed` (SciTools#6003)
  Bump scitools/workflows from 2024.06.2 to 2024.06.3 (SciTools#6015)
  Mesh saveload fix (SciTools#6004)
  used tabs for the install info (SciTools#6013)
  Fix array_equal behaviour for masked arrays (SciTools#4457)
  Bump scitools/workflows from 2024.06.1 to 2024.06.2 (SciTools#6008)
  [pre-commit.ci] pre-commit autoupdate (SciTools#6007)
  Updated environment lockfiles (SciTools#5996)
  Added more descriptive errors within concatenate (SciTools#6005)
  Bump scitools/workflows from 2024.06.0 to 2024.06.1 (SciTools#5998)
  [pre-commit.ci] pre-commit autoupdate (SciTools#5997)
  Bump scitools/workflows from 2024.05.1 to 2024.06.0 (SciTools#5986)
  [pre-commit.ci] pre-commit autoupdate (SciTools#5980)
  Updated environment lockfiles (SciTools#5983)
  Bump scitools/workflows from 2024.05.0 to 2024.05.1 (SciTools#5984)
  Make `slices_over` tests go faster (SciTools#5973)
  Updated environment lockfiles (SciTools#5979)
  Update lock files with associated fixes (SciTools#5953)
  List 25 slowest tests (SciTools#5969)
  ...
  • Loading branch information
tkknight committed Jun 23, 2024
2 parents 3715d71 + 0cdbdee commit afbd003
Show file tree
Hide file tree
Showing 283 changed files with 10,545 additions and 5,939 deletions.
9 changes: 9 additions & 0 deletions .github/labeler.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# benchmark_this automatically triggers the benchmark workflow when added by
# a user. No triggering happens when GitHub Actions adds the label (this
# avoids security vulnerabilities), so alternative triggers for the below
# files are therefore included in workflows/benchmarks_run.yml. Automatic
# labelling is still included here to make it easier to search pull requests,
# and to reinforce the culture of using this label.
benchmark_this:
- changed-files:
- any-glob-to-any-file: ['requirements/locks/*.lock', "setup.py"]
5 changes: 5 additions & 0 deletions .github/pull_request_template.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,8 @@

---
[Consult Iris pull request check list]( https://scitools-iris.readthedocs.io/en/latest/developers_guide/contributing_pull_request_checklist.html)

---
Add any of the below labels to trigger actions on this PR:

- https://github.com/SciTools/iris/labels/benchmark_this
57 changes: 44 additions & 13 deletions .github/workflows/benchmarks_run.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,46 @@ on:
required: false
type: string
pull_request:
types: [labeled]
# Add the `labeled` type to the default list.
types: [labeled, opened, synchronize, reopened]

jobs:
pre-checks:
runs-on: ubuntu-latest
if: github.repository == 'SciTools/iris'
outputs:
overnight: ${{ steps.overnight.outputs.check }}
branch: ${{ steps.branch.outputs.check }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 2
- id: files-changed
uses: marceloprado/has-changed-path@df1b7a3161b8fb9fd8c90403c66a9e66dfde50cb
with:
# SEE ALSO .github/labeler.yml .
paths: requirements/locks/*.lock setup.py
- id: overnight
if: github.event_name != 'pull_request'
run: echo "check=true" >> "$GITHUB_OUTPUT"
- id: branch
if: >
github.event_name == 'pull_request'
&&
(
steps.files-changed.outputs.changed == 'true'
||
github.event.label.name == 'benchmark_this'
)
run: echo "check=true" >> "$GITHUB_OUTPUT"


benchmark:
if: >
github.repository == 'SciTools/iris' &&
(github.event_name != 'pull_request' ||
github.event.label.name == 'benchmark_this')
runs-on: ubuntu-latest
needs: pre-checks
if: >
needs.pre-checks.outputs.overnight == 'true' ||
needs.pre-checks.outputs.branch == 'true'
env:
IRIS_TEST_DATA_LOC_PATH: benchmarks
Expand All @@ -40,13 +71,13 @@ jobs:
with:
fetch-depth: 0

- name: Install ASV & Nox
- name: Install Nox
run: |
pip install asv nox
pip install nox
- name: Cache environment directories
id: cache-env-dir
uses: actions/cache@v3
uses: actions/cache@v4
with:
path: |
.nox
Expand All @@ -56,7 +87,7 @@ jobs:

- name: Cache test data directory
id: cache-test-data
uses: actions/cache@v3
uses: actions/cache@v4
with:
path: |
${{ env.IRIS_TEST_DATA_PATH }}
Expand All @@ -76,16 +107,16 @@ jobs:
echo "OVERRIDE_TEST_DATA_REPOSITORY=${GITHUB_WORKSPACE}/${IRIS_TEST_DATA_PATH}/test_data" >> $GITHUB_ENV
- name: Benchmark this pull request
if: ${{ github.event.label.name == 'benchmark_this' }}
if: needs.pre-checks.outputs.branch == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.number }}
run: |
python benchmarks/bm_runner.py branch origin/${{ github.base_ref }}
nox -s benchmarks -- branch origin/${{ github.base_ref }}
- name: Run overnight benchmarks
id: overnight
if: ${{ github.event_name != 'pull_request' }}
if: needs.pre-checks.outputs.overnight == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
Expand All @@ -97,7 +128,7 @@ jobs:
if [ "$first_commit" != "" ]
then
python benchmarks/bm_runner.py overnight $first_commit
nox -s benchmarks -- overnight $first_commit
fi
- name: Warn of failure
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ci-manifest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,4 @@ concurrency:
jobs:
manifest:
name: "check-manifest"
uses: scitools/workflows/.github/workflows/ci-manifest.yml@2023.12.1
uses: scitools/workflows/.github/workflows/ci-manifest.yml@2024.06.3
16 changes: 10 additions & 6 deletions .github/workflows/ci-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,18 @@ jobs:
fail-fast: false
matrix:
os: ["ubuntu-latest"]
python-version: ["3.11"]
python-version: ["3.12"]
session: ["doctest", "gallery", "linkcheck"]
include:
- os: "ubuntu-latest"
python-version: "3.11"
python-version: "3.12"
session: "tests"
coverage: "--coverage"
- os: "ubuntu-latest"
python-version: "3.10"
python-version: "3.11"
session: "tests"
- os: "ubuntu-latest"
python-version: "3.9"
python-version: "3.10"
session: "tests"

env:
Expand Down Expand Up @@ -136,9 +136,13 @@ jobs:
- name: "iris ${{ matrix.session }}"
env:
PY_VER: ${{ matrix.python-version }}
# Force coloured output on GitHub Actions.
PY_COLORS: "1"
run: |
nox --session ${{ matrix.session }} -- --verbose ${{ matrix.coverage }}
- name: Upload coverage report
uses: codecov/codecov-action@v3
- name: "upload coverage report"
if: ${{ matrix.coverage }}
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
2 changes: 1 addition & 1 deletion .github/workflows/ci-wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11"]
python-version: ["3.10", "3.11", "3.12"]
session: ["wheel"]
env:
ENV_NAME: "ci-wheels"
Expand Down
15 changes: 15 additions & 0 deletions .github/workflows/labeler.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Reference
# - https://github.com/actions/labeler

name: "Pull Request Labeler"
on:
- pull_request_target

jobs:
labeler:
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5
2 changes: 1 addition & 1 deletion .github/workflows/refresh-lockfiles.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,5 @@ on:

jobs:
refresh_lockfiles:
uses: scitools/workflows/.github/workflows/refresh-lockfiles.yml@2023.12.1
uses: scitools/workflows/.github/workflows/refresh-lockfiles.yml@2024.06.3
secrets: inherit
15 changes: 11 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ minimum_pre_commit_version: 1.21.0

repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
rev: v4.6.0
hooks:
# Prevent giant files from being committed.
- id: check-added-large-files
Expand All @@ -29,7 +29,7 @@ repos:
- id: no-commit-to-branch

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.1.11"
rev: "v0.4.9"
hooks:
- id: ruff
types: [file, python]
Expand All @@ -38,14 +38,14 @@ repos:
types: [file, python]

- repo: https://github.com/codespell-project/codespell
rev: "v2.2.6"
rev: "v2.3.0"
hooks:
- id: codespell
types_or: [asciidoc, python, markdown, rst]
additional_dependencies: [tomli]

- repo: https://github.com/PyCQA/flake8
rev: 7.0.0
rev: 7.1.0
hooks:
- id: flake8
types: [file, python]
Expand All @@ -61,3 +61,10 @@ repos:
hooks:
- id: sort-all
types: [file, python]

- repo: https://github.com/numpy/numpydoc
rev: v1.7.0
hooks:
- id: numpydoc-validation
exclude: "^lib/iris/tests/|docs/gallery_code/"
types: [file, python]
4 changes: 2 additions & 2 deletions .readthedocs.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
version: 2

build:
os: ubuntu-20.04
os: "ubuntu-22.04"
tools:
python: mambaforge-4.10
python: "mambaforge-22.9"
jobs:
post_checkout:
# The SciTools/iris repository is shallow i.e., has a .git/shallow,
Expand Down
1 change: 0 additions & 1 deletion .ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ lint.ignore = [
"D102", # Missing docstring in public method
# (D-3) Temporary, before an initial review, either fix ocurrences or move to (2).
"D103", # Missing docstring in public function
"D401", # First line of docstring should be in imperative mood: ...

# pyupgrade (UP)
# https://docs.astral.sh/ruff/rules/#pyupgrade-up
Expand Down
43 changes: 35 additions & 8 deletions benchmarks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@ the PR's base branch, thus showing performance differences introduced
by the PR. (This run is managed by
[the aforementioned GitHub Action](../.github/workflows/benchmark.yml)).

`asv ...` commands must be run from this directory. You will need to have ASV
installed, as well as Nox (see
[Benchmark environments](#benchmark-environments)).

The benchmark runner ([bm_runner.py](./bm_runner.py)) provides conveniences for
To run locally: the **benchmark runner** provides conveniences for
common benchmark setup and run tasks, including replicating the automated
overnight run locally. See `python bm_runner.py --help` for detail.
overnight run locally. This is accessed via the Nox `benchmarks` session - see
`nox -s benchmarks -- --help` for detail (_see also:
[bm_runner.py](./bm_runner.py)_). Alternatively you can directly run `asv ...`
commands from this directory (you will still need Nox installed - see
[Benchmark environments](#benchmark-environments)).

A significant portion of benchmark run time is environment management. Run-time
can be reduced by placing the benchmark environment on the same file system as
Expand All @@ -43,11 +43,17 @@ if it is not already. You can achieve this by either:

* `OVERRIDE_TEST_DATA_REPOSITORY` - required - some benchmarks use
`iris-test-data` content, and your local `site.cfg` is not available for
benchmark scripts.
benchmark scripts. The benchmark runner defers to any value already set in
the shell, but will otherwise download `iris-test-data` and set the variable
accordingly.
* `DATA_GEN_PYTHON` - required - path to a Python executable that can be
used to generate benchmark test objects/files; see
[Data generation](#data-generation). The benchmark runner sets this
automatically, but will defer to any value already set in the shell.
automatically, but will defer to any value already set in the shell. Note that
[Mule](https://github.com/metomi/mule) will be automatically installed into
this environment, and sometimes
[iris-test-data](https://github.com/SciTools/iris-test-data) (see
`OVERRIDE_TEST_DATA_REPOSITORY`).
* `BENCHMARK_DATA` - optional - path to a directory for benchmark synthetic
test data, which the benchmark scripts will create if it doesn't already
exist. Defaults to `<root>/benchmarks/.data/` if not set. Note that some of
Expand All @@ -62,6 +68,23 @@ interest. Is set during the benchmark runner `cperf` and `sperf` sub-commands.

[See the ASV docs](https://asv.readthedocs.io/) for full detail.

### What benchmarks to write

It is not possible to maintain a full suite of 'unit style' benchmarks:

* Benchmarks take longer to run than tests.
* Small benchmarks are more vulnerable to noise - they report a lot of false
positive regressions.

We therefore recommend writing benchmarks representing scripts or single
operations that are likely to be run at the user level.

The drawback of this approach: a reported regression is less likely to reveal
the root cause (e.g. if a commit caused a regression in coordinate-creation
time, but the only benchmark covering this was for file-loading). Be prepared
for manual investigations; and consider committing any useful benchmarks as
[on-demand benchmarks](#on-demand-benchmarks) for future developers to use.

### Data generation
**Important:** be sure not to use the benchmarking environment to generate any
test objects/files, as this environment changes with each commit being
Expand All @@ -86,6 +109,10 @@ estimate run-time, and these will still be subject to the original problem.

### Scaling / non-Scaling Performance Differences

**(We no longer advocate the below for benchmarks run during CI, given the
limited available runtime and risk of false-positives. It remains useful for
manual investigations).**

When comparing performance between commits/file-type/whatever it can be helpful
to know if the differences exist in scaling or non-scaling parts of the Iris
functionality in question. This can be done using a size parameter, setting
Expand Down
6 changes: 5 additions & 1 deletion benchmarks/asv.conf.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@
"environment_type": "conda-delegated",
"show_commit_url": "https://github.com/scitools/iris/commit/",
"branches": ["upstream/main"],
"build_command": [
"python setup.py build",
"python -mpip wheel --no-deps -w {build_cache_dir} {build_dir}"
],

"benchmark_dir": "./benchmarks",
"env_dir": ".asv/env",
Expand All @@ -19,7 +23,7 @@
// * No build-time environment variables.
// * Is run in the same environment as the ASV install itself.
"delegated_env_commands": [
"PY_VER=3.11 nox --envdir={conf_dir}/.asv/env/nox01 --session=tests --install-only --no-error-on-external-run --verbose"
"PY_VER=3.12 nox --envdir={conf_dir}/.asv/env/nox01 --session=tests --install-only --no-error-on-external-run --verbose"
],
// The parent directory of the above environment.
// The most recently modified environment in the directory will be used.
Expand Down
13 changes: 8 additions & 5 deletions benchmarks/asv_delegated_conda.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,15 @@ def __init__(
Parameters
----------
conf : Config instance
conf : Config
Config instance.
python : str
Version of Python. Must be of the form "MAJOR.MINOR".
requirements : dict
Dictionary mapping a PyPI package name to a version
identifier string.
tagged_env_vars : dict
Environment variables, tagged for build vs. non-build
Environment variables, tagged for build vs. non-build.
"""
ignored = ["`python`"]
Expand Down Expand Up @@ -194,6 +192,11 @@ def copy_asv_files(src_parent: Path, dst_parent: Path) -> None:
# Record new environment information in properties.
self._update_info()

def _run_conda(self, args, env=None):
# TODO: remove after airspeed-velocity/asv#1397 is merged and released.
args = ["--yes" if arg == "--force" else arg for arg in args]
return super()._run_conda(args, env)

def checkout_project(self, repo: Repo, commit_hash: str) -> None:
"""Check out the working tree of the project at given commit hash."""
super().checkout_project(repo, commit_hash)
Expand Down
Loading

0 comments on commit afbd003

Please sign in to comment.