-
Notifications
You must be signed in to change notification settings - Fork 1.3k
117 lines (103 loc) · 3.77 KB
/
generate-baselines.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# Not currently in use
name: Generate Baselines
on:
workflow_dispatch:
inputs:
dummy:
description: 'Place holder'
required: false
default: 'Dummy argument'
env:
AUTOMLBENCHMARK_REPO: 'openml/automlbenchmark'
AUTOMLBENCHMARK_REF: 'master'
GITHUB_URL: 'https://github.com'
GITHUB_RAW_URL: 'https://raw.githubusercontent.com'
FILE_SOURCE_DIR: '.github/workflows/benchmarking-files'
jobs:
generate-baselines:
name: Generate Baselines
runs-on: ubuntu-latest
strategy:
matrix:
task_type: [regression, classification]
steps:
- name: Debug
env:
GITHUB_CONTEXT: ${{ toJson(github) }}
run: |
echo "$GITHUB_CONTEXT"
pwd
ls -a
- name: Branch extract
id: extract
env:
head_ref: ${{ github.head_ref }}
run: |
# Sometimes a pull request does or doesn't have head_ref (branch) set.
# When it is not, GITHUB_REF holds the branch
if [[ -z $head_ref ]]
then
echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
else
echo "##[set-output name=branch;]$(echo $head_ref)"
fi
# outputs
# branch: the branch name
- name: Get System Python Version
id: python-version
run: |
echo "::set-output name=value::$(python3 -V | grep -o -e '[0-9\.]*$')"
# outputs
# value: The python version used by the installed system
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: ${{ steps.python-version.outputs.value }}
- name: Checkout Automlbenchmark
uses: actions/[email protected]
with:
repository: ${{ env.AUTOMLBENCHMARK_REPO }}
ref: ${{ env.AUTOMLBENCHMARK_REF }}
- name: Install Automlbenchmark
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Fetch configs
id: fetch
env:
source: ${{ env.GITHUB_RAW_URL }}/${{ github.repository }}/${{ steps.extract.outputs.branch }}/${{ env.FILE_SOURCE_DIR }}
benchmark_dir: 'baseline_benchmark'
config_file: 'config.yaml'
constraints_file: 'constraints.yaml'
benchmark_file: ${{ matrix.task_type }}.yaml
run: |
mkdir $benchmark_dir
mkdir $benchmark_dir/benchmarks
wget ${source}/${config_file} -O ${benchmark_dir}/${config_file}
wget ${source}/${constraints_file} -O ${benchmark_dir}/${constraints_file}
wget ${source}/benchmarks/${benchmark_file} -O ${benchmark_dir}/benchmarks/${benchmark_file}
echo "::set-output name=benchmarkdir::$(pwd)/${benchmark_dir}"
# outputs
# benchmarkdir: the dir with config in it
- name: Run the automl benchmark
id: bench
env:
benchmarkdir: ${{ steps.fetch.outputs.benchmarkdir }}
framework: autosklearn:latest
benchmark: ${{ matrix.task_type }}
constraint: 10fold10min
benchmark_output: results/results.csv
results_to: baseline_${{ matrix.task_type }}_${{ github.repository_owner }}_${{ steps.extract.outputs.branch }}_${{ github.sha }}.csv
run: |
python runbenchmark.py -u $benchmarkdir $framework $benchmark $constraint
mv $benchmark_output $results_to
echo "::set-output name=results_path::$(pwd)/${results_to}"
# outputs:
# results_path: path to the benchmark results
- name: Upload Results as Artifact
uses: actions/upload-artifact@v3
with:
name: baselines
path: |
${{ steps.bench.outputs.results_path }}
retention-days: 90