-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathvalohai.yaml
168 lines (162 loc) · 4.89 KB
/
valohai.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
- step:
name: prepare_data
environment: aws-eu-west-1-g4dn-xlarge
image: ultralytics/yolov5
command:
- pip install valohai-utils
- python data-preprocess.py {parameters}
inputs:
- name: dataset
default: https://valohai-demo-library-data.s3.eu-west-1.amazonaws.com/drift-detection/ships-aerial-images.zip
parameters:
- name: train_size
default: 15
type: integer
description: total train dataset size
- name: valid_size
default: 10
type: integer
description: total validation dataset size
- name: test_size
default: 10
type: integer
description: total test dataset size
- name: image_size
default: 768
type: integer
- step:
name: train_model
environment: aws-eu-west-1-g4dn-xlarge
image: ultralytics/yolov5
command:
- pip install valohai-utils
- python train.py {parameters}
inputs:
- name: train
default: dataset://drift-demo-ships-aerial/dev_train
- name: test
default: dataset://drift-demo-ships-aerial/dev_test
- name: valid
default: dataset://drift-demo-ships-aerial/dev_valid
parameters:
- name: yolo_model_name
default: "yolov8x.pt"
type: string
- name: epochs
default: 3
type: integer
description: total training epochs
- name: batch_size
default: 8
type: integer
description: total batch size
- name: image_size
default: 768
type: integer
- name: optimizer
default: "SGD"
type: string
description: optimizer
- name: seed
default: 42
type: integer
- name: project
default: /valohai/outputs/
type: string
description: save to project/name
- step:
name: evaluation
image: ultralytics/yolov5
environment: aws-eu-west-1-g4dn-xlarge
command:
- pip install valohai-utils
- python evaluation.py {parameters}
inputs:
- name: model
default: datum://model-current-best
- name: data_yaml
default: datum://data_yaml
- name: valid
default: dataset://drift-demo-ships-aerial/dev_valid
- name: test
default: dataset://drift-demo-ships-aerial/dev_test
- step:
name: inference
image: ultralytics/yolov5
command:
- pip install "whylogs[image,viz]"
- pip install valohai-utils
- python inference.py {parameters}
inputs:
- name: data
default: https://valohai-demo-library-data.s3.eu-west-1.amazonaws.com/drift-detection/drifted_colorspace.zip
- name: ref_data
default: dataset://drift-demo-ships-aerial/dev_valid
- name: model
default: datum://model-current-best
environment-variables:
- name: WHYLABS_DEFAULT_ORG_ID
default: org-BfZcPw
- name: WHYLABS_DEFAULT_DATASET_ID
default: "model-4"
- name: WHYLABS_REF_ID
default: ref-ijWFMLXVQqmRM9i4
parameters:
- name: use_whylabs_reference_profile
default: True
type: flag
description: Indicates whether to load the reference profile from WhyLabs. If False, the reference profile will be created based on the provided input data (`ref_data`).
- name: reference_profile_output_name
default: yolo_example_reference_profile
type: string
description: The name used to save the reference profile if it is generated from the provided input data. This is ignored if `use_whylabs_reference_profile` is True.
- name: export_inference_results
default: True
type: flag
description: Determines whether the results of the YOLO inference should be saved to the outputs directory.
- step:
name: call-retrain
image: python:3.9
command:
- pip install requests
- python call-retrain.py {parameters}
parameters:
- name: save_results
default: 0
type: integer
- pipeline:
name: train-val-pipeline
nodes:
- name: preprocess
type: execution
step: prepare_data
- name: training
type: execution
step: train_model
- name: evaluation
type: execution
step: evaluation
edges:
- [preprocess.parameter.image_size, training.parameter.image_size]
- [training.output.*best.pt, evaluation.input.model]
- pipeline:
name: drift-detection-pipeline
nodes:
- name: drift-detect
type: execution
step: inference
actions:
- when: node-complete
if: metadata.drift <= 1
then: stop-pipeline
- name: call-retrain
type: execution
step: call-retrain
actions:
- when: node-starting
then: require-approval
edges:
- [
drift-detect.parameter.save_results,
call-retrain.parameter.save_results,
]