-
Notifications
You must be signed in to change notification settings - Fork 29
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch '177-plotting-capabilities' into 177b-plotting-capabilities
- Loading branch information
Showing
22 changed files
with
705 additions
and
47 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
# Initial Source: pybop-team/PyBop | ||
|
||
# This workflow periodically runs the benchmarks suite in benchmarks/ | ||
# using asv and publish the results, effectively updating | ||
# the display website hosted in the pybop-bench repo | ||
|
||
# Steps: | ||
# - Benchmark all commits since the last one that was benchmarked | ||
# - Push results to pybop-bench repo | ||
# - Publish website | ||
name: Benchmarks | ||
on: | ||
# Everyday at 12 pm UTC | ||
schedule: | ||
- cron: "0 12 * * *" | ||
# Make it possible to trigger the | ||
# workflow manually | ||
workflow_dispatch: | ||
|
||
jobs: | ||
benchmarks: | ||
runs-on: [self-hosted, macOS, ARM64] | ||
if: github.repository == 'pybop-team/PyBOP' | ||
steps: | ||
- uses: actions/checkout@v4 | ||
|
||
- name: Install python & create virtualenv | ||
shell: bash | ||
run: | | ||
eval "$(pyenv init -)" | ||
pyenv install 3.12 -s | ||
pyenv virtualenv 3.12 pybop-312-bench | ||
- name: Install dependencies & run benchmarks | ||
shell: bash | ||
run: | | ||
eval "$(pyenv init -)" | ||
pyenv activate pybop-312-bench | ||
python -m pip install -e .[all,dev] | ||
python -m pip install asv[virtualenv] | ||
python -m asv machine --machine "SelfHostedRunner" | ||
python -m asv run --machine "SelfHostedRunner" NEW --show-stderr -v | ||
- name: Upload results as artifact | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: asv_periodic_results | ||
path: results | ||
|
||
- name: Uninstall pyenv-virtualenv & python | ||
if: always() | ||
shell: bash | ||
run: | | ||
eval "$(pyenv init -)" | ||
pyenv activate pybop-312-bench | ||
pyenv uninstall -f $( python --version ) | ||
publish-results: | ||
name: Push and publish results | ||
needs: benchmarks | ||
runs-on: ubuntu-latest | ||
if: github.repository == 'pybop-team/PyBOP' | ||
steps: | ||
- name: Set up Python 3.12 | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: 3.12 | ||
|
||
- name: Install asv | ||
run: pip install asv | ||
|
||
- name: Checkout pybop-bench repo | ||
uses: actions/checkout@v4 | ||
with: | ||
repository: pybop-team/pybop-bench | ||
token: ${{ secrets.PUSH_BENCH_TOKEN }} | ||
|
||
- name: Download results artifact | ||
uses: actions/download-artifact@v4 | ||
with: | ||
name: asv_periodic_results | ||
path: new_results | ||
|
||
- name: Copy new results and push to pybop-bench repo | ||
env: | ||
PUSH_BENCH_EMAIL: ${{ secrets.PUSH_BENCH_EMAIL }} | ||
PUSH_BENCH_NAME: ${{ secrets.PUSH_BENCH_NAME }} | ||
run: | | ||
cp -vr new_results/* results | ||
git config --global user.email "$PUSH_BENCH_EMAIL" | ||
git config --global user.name "$PUSH_BENCH_NAME" | ||
git add results | ||
git commit -am "Add new benchmark results" | ||
git push | ||
- name: Publish results | ||
run: | | ||
asv publish | ||
git fetch origin gh-pages:gh-pages | ||
asv gh-pages |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -310,3 +310,7 @@ $RECYCLE.BIN/ | |
|
||
# Output JSON files | ||
**/fit_ecm_parameters.json | ||
|
||
# Airspeed Velocity | ||
*.asv/ | ||
results/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
{ | ||
"version": 1, | ||
"project": "PyBOP", | ||
"project_url": "https://github.com/pybop-team/pybop", | ||
"repo": ".", | ||
"build_command": [ | ||
"python -m pip install build", | ||
"python -m build --wheel -o {build_cache_dir} {build_dir}" | ||
], | ||
"default_benchmark_timeout": 180, | ||
"branches": ["develop"], | ||
"environment_type": "virtualenv", | ||
"matrix": { | ||
"req":{ | ||
"pybamm": [], | ||
"numpy": [], | ||
"scipy": [], | ||
"pints": [] | ||
} | ||
}, | ||
"build_cache_dir": ".asv/cache", | ||
"build_dir": ".asv/build" | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,104 @@ | ||
# Benchmarking Directory for PyBOP | ||
|
||
Welcome to the benchmarking directory for PyBOP. We use `asv` (airspeed velocity) for benchmarking, which is a tool for running Python benchmarks over time in a consistent environment. This document will guide you through the setup, execution, and viewing of benchmarks. | ||
|
||
## Quick Links | ||
|
||
- [Airspeed Velocity (asv) Documentation](https://asv.readthedocs.io/) | ||
|
||
## Prerequisites | ||
|
||
Before you can run benchmarks, you need to ensure that `asv` is installed and that you have a working Python environment. It is also recommended to run benchmarks in a clean, dedicated virtual environment to avoid any side-effects from your local environment. | ||
|
||
### Installing `asv` | ||
|
||
You can install `asv` using `pip`. It's recommended to do this within a virtual environment: | ||
|
||
```bash | ||
pip install asv | ||
``` | ||
|
||
## Setting Up Benchmarks | ||
|
||
The `benchmarks` directory already contains a set of benchmarks for the package. To add or modify benchmarks, edit the `.py` files within this directory. | ||
|
||
Each benchmark file should contain one or more classes with methods that `asv` will automatically recognize as benchmarks. Here's an example structure for a benchmark file: | ||
|
||
```python | ||
class ExampleBenchmarks: | ||
def setup(self): | ||
# Code to run before each benchmark method is executed | ||
pass | ||
|
||
def time_example_benchmark(self): | ||
# The actual benchmark code | ||
pass | ||
|
||
def teardown(self): | ||
# Code to run after each benchmark method is executed | ||
pass | ||
``` | ||
|
||
## Running Benchmarks | ||
|
||
With `asv` installed and your benchmarks set up, you can now run benchmarks using the following standard `asv` commands: | ||
|
||
### Running All Benchmarks | ||
|
||
To run all benchmarks in your python env: | ||
|
||
```bash | ||
asv run | ||
``` | ||
|
||
This will test the current state of your codebase by default. You can specify a range of commits to run benchmarks against by appending a commit range to the command, like so: | ||
|
||
```bash | ||
asv run <commit-hash-1>..<commit-hash-2> | ||
``` | ||
|
||
For quick benchmarking, pass the `--quick` argument to `asv run`. This runs each benchmark once and returns the singular value. | ||
|
||
```bash | ||
asv run --quick | ||
``` | ||
|
||
### Running Specific Benchmarks | ||
|
||
To run a specific benchmark, use: | ||
|
||
```bash | ||
asv run --bench <benchmark name> | ||
``` | ||
|
||
### Running Benchmarks for a Specific Environment | ||
|
||
To run benchmarks against a specific Python version: | ||
|
||
```bash | ||
asv run --python=same # To use the same Python version as the current environment | ||
asv run --python=3.8 # To specify the Python version | ||
``` | ||
|
||
## Viewing Benchmark Results | ||
|
||
After running benchmarks, `asv` will generate results which can be viewed as a web page: | ||
|
||
```bash | ||
asv publish | ||
asv preview | ||
``` | ||
|
||
Now you can open your web browser to the URL provided by `asv` to view the results. | ||
|
||
## Continuous Benchmarking | ||
|
||
You can also set up `asv` for continuous benchmarking where it will track the performance over time. This typically involves integration with a continuous integration (CI) system. | ||
|
||
For more detailed instructions on setting up continuous benchmarking, consult the [asv documentation](https://asv.readthedocs.io/en/stable/using.html#continuous-benchmarking). | ||
|
||
## Reporting Issues | ||
|
||
If you encounter any issues or have suggestions for improving the benchmarks, please open an issue or a pull request in the project repository. | ||
|
||
Thank you for contributing to the performance of the package! |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,81 @@ | ||
import pybop | ||
import numpy as np | ||
from .benchmark_utils import set_random_seed | ||
|
||
|
||
class BenchmarkModel: | ||
param_names = ["model", "parameter_set"] | ||
params = [ | ||
[pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe], | ||
["Chen2020"], | ||
] | ||
|
||
def setup(self, model, parameter_set): | ||
""" | ||
Setup the model and problem for predict and simulate benchmarks. | ||
Args: | ||
model (pybop.Model): The model class to be benchmarked. | ||
parameter_set (str): The name of the parameter set to be used. | ||
""" | ||
# Set random seed | ||
set_random_seed() | ||
|
||
# Create model instance | ||
self.model = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) | ||
|
||
# Define fitting parameters | ||
parameters = [ | ||
pybop.Parameter( | ||
"Current function [A]", | ||
prior=pybop.Gaussian(0.4, 0.02), | ||
bounds=[0.2, 0.7], | ||
initial_value=0.4, | ||
) | ||
] | ||
|
||
# Generate synthetic data | ||
sigma = 0.001 | ||
self.t_eval = np.arange(0, 900, 2) | ||
values = self.model.predict(t_eval=self.t_eval) | ||
corrupt_values = values["Voltage [V]"].data + np.random.normal( | ||
0, sigma, len(self.t_eval) | ||
) | ||
|
||
self.inputs = { | ||
"Current function [A]": 0.4, | ||
} | ||
|
||
# Create dataset | ||
dataset = pybop.Dataset( | ||
{ | ||
"Time [s]": self.t_eval, | ||
"Current function [A]": values["Current [A]"].data, | ||
"Voltage [V]": corrupt_values, | ||
} | ||
) | ||
|
||
# Create fitting problem | ||
self.problem = pybop.FittingProblem( | ||
model=self.model, dataset=dataset, parameters=parameters, init_soc=0.5 | ||
) | ||
|
||
def time_model_predict(self, model, parameter_set): | ||
""" | ||
Benchmark the predict method of the model. | ||
Args: | ||
model (pybop.Model): The model class being benchmarked. | ||
parameter_set (str): The name of the parameter set being used. | ||
""" | ||
self.model.predict(inputs=self.inputs, t_eval=self.t_eval) | ||
|
||
def time_model_simulate(self, model, parameter_set): | ||
""" | ||
Benchmark the simulate method of the model. | ||
Args: | ||
model (pybop.Model): The model class being benchmarked. | ||
parameter_set (str): The name of the parameter set being used. | ||
""" | ||
self.problem._model.simulate(inputs=self.inputs, t_eval=self.t_eval) |
Oops, something went wrong.