8
8
TorchScript | yolov5s.torchscript | `torchscript`
9
9
ONNX | yolov5s.onnx | `onnx`
10
10
CoreML | yolov5s.mlmodel | `coreml`
11
+ OpenVINO | yolov5s_openvino_model/ | `openvino`
11
12
TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model`
12
13
TensorFlow GraphDef | yolov5s.pb | `pb`
13
14
TensorFlow Lite | yolov5s.tflite | `tflite`
14
15
TensorFlow.js | yolov5s_web_model/ | `tfjs`
15
16
TensorRT | yolov5s.engine | `engine`
16
17
17
18
Usage:
18
- $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs
19
+ $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs
19
20
20
21
Inference:
21
22
$ python path/to/detect.py --weights yolov5s.pt
22
23
yolov5s.torchscript
23
24
yolov5s.onnx
24
25
yolov5s.mlmodel (under development)
26
+ yolov5s_openvino_model (under development)
25
27
yolov5s_saved_model
26
28
yolov5s.pb
27
29
yolov5s.tflite
@@ -144,6 +146,23 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
144
146
return ct_model
145
147
146
148
149
+ def export_openvino (model , im , file , prefix = colorstr ('OpenVINO:' )):
150
+ # YOLOv5 OpenVINO export
151
+ try :
152
+ check_requirements (('openvino-dev' ,)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
153
+ import openvino .inference_engine as ie
154
+
155
+ LOGGER .info (f'\n { prefix } starting export with openvino { ie .__version__ } ...' )
156
+ f = str (file ).replace ('.pt' , '_openvino_model' + os .sep )
157
+
158
+ cmd = f"mo --input_model { file .with_suffix ('.onnx' )} --output_dir { f } "
159
+ subprocess .check_output (cmd , shell = True )
160
+
161
+ LOGGER .info (f'{ prefix } export success, saved as { f } ({ file_size (f ):.1f} MB)' )
162
+ except Exception as e :
163
+ LOGGER .info (f'\n { prefix } export failure: { e } ' )
164
+
165
+
147
166
def export_saved_model (model , im , file , dynamic ,
148
167
tf_nms = False , agnostic_nms = False , topk_per_class = 100 , topk_all = 100 , iou_thres = 0.45 ,
149
168
conf_thres = 0.25 , prefix = colorstr ('TensorFlow saved_model:' )):
@@ -317,15 +336,15 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
317
336
imgsz = (640 , 640 ), # image (height, width)
318
337
batch_size = 1 , # batch size
319
338
device = 'cpu' , # cuda device, i.e. 0 or 0,1,2,3 or cpu
320
- include = ('torchscript' , 'onnx' , 'coreml' ), # include formats
339
+ include = ('torchscript' , 'onnx' ), # include formats
321
340
half = False , # FP16 half-precision export
322
341
inplace = False , # set YOLOv5 Detect() inplace=True
323
342
train = False , # model.train() mode
324
343
optimize = False , # TorchScript: optimize for mobile
325
344
int8 = False , # CoreML/TF INT8 quantization
326
345
dynamic = False , # ONNX/TF: dynamic axes
327
346
simplify = False , # ONNX: simplify model
328
- opset = 14 , # ONNX: opset version
347
+ opset = 12 , # ONNX: opset version
329
348
verbose = False , # TensorRT: verbose log
330
349
workspace = 4 , # TensorRT: workspace size (GB)
331
350
nms = False , # TF: add NMS to model
@@ -338,9 +357,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
338
357
t = time .time ()
339
358
include = [x .lower () for x in include ]
340
359
tf_exports = list (x in include for x in ('saved_model' , 'pb' , 'tflite' , 'tfjs' )) # TensorFlow exports
341
- imgsz *= 2 if len (imgsz ) == 1 else 1 # expand
342
360
file = Path (url2file (weights ) if str (weights ).startswith (('http:/' , 'https:/' )) else weights )
343
361
362
+ # Checks
363
+ imgsz *= 2 if len (imgsz ) == 1 else 1 # expand
364
+ opset = 12 if ('openvino' in include ) else opset # OpenVINO requires opset <= 12
365
+
344
366
# Load PyTorch model
345
367
device = select_device (device )
346
368
assert not (device .type == 'cpu' and half ), '--half only compatible with GPU export, i.e. use --device 0'
@@ -372,12 +394,14 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
372
394
# Exports
373
395
if 'torchscript' in include :
374
396
export_torchscript (model , im , file , optimize )
375
- if 'onnx' in include :
397
+ if ( 'onnx' in include ) or ( 'openvino' in include ): # OpenVINO requires ONNX
376
398
export_onnx (model , im , file , opset , train , dynamic , simplify )
377
399
if 'engine' in include :
378
400
export_engine (model , im , file , train , half , simplify , workspace , verbose )
379
401
if 'coreml' in include :
380
402
export_coreml (model , im , file )
403
+ if 'openvino' in include :
404
+ export_openvino (model , im , file )
381
405
382
406
# TensorFlow Exports
383
407
if any (tf_exports ):
@@ -413,7 +437,7 @@ def parse_opt():
413
437
parser .add_argument ('--int8' , action = 'store_true' , help = 'CoreML/TF INT8 quantization' )
414
438
parser .add_argument ('--dynamic' , action = 'store_true' , help = 'ONNX/TF: dynamic axes' )
415
439
parser .add_argument ('--simplify' , action = 'store_true' , help = 'ONNX: simplify model' )
416
- parser .add_argument ('--opset' , type = int , default = 14 , help = 'ONNX: opset version' )
440
+ parser .add_argument ('--opset' , type = int , default = 12 , help = 'ONNX: opset version' )
417
441
parser .add_argument ('--verbose' , action = 'store_true' , help = 'TensorRT: verbose log' )
418
442
parser .add_argument ('--workspace' , type = int , default = 4 , help = 'TensorRT: workspace size (GB)' )
419
443
parser .add_argument ('--nms' , action = 'store_true' , help = 'TF: add NMS to model' )
0 commit comments