Skip to content

Commit 95c7bc2

Browse files
authored
OpenVINO Export (ultralytics#6057)
* OpenVINO export * Remove timeout * Add 3 files * str * Constrain opset to 12 * Default ONNX opset to 12 * Make dir * Make dir * Cleanup * Cleanup * check_requirements(('openvino-dev',))
1 parent dc54ed5 commit 95c7bc2

File tree

2 files changed

+31
-6
lines changed

2 files changed

+31
-6
lines changed

export.py

+30-6
Original file line numberDiff line numberDiff line change
@@ -8,20 +8,22 @@
88
TorchScript | yolov5s.torchscript | `torchscript`
99
ONNX | yolov5s.onnx | `onnx`
1010
CoreML | yolov5s.mlmodel | `coreml`
11+
OpenVINO | yolov5s_openvino_model/ | `openvino`
1112
TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model`
1213
TensorFlow GraphDef | yolov5s.pb | `pb`
1314
TensorFlow Lite | yolov5s.tflite | `tflite`
1415
TensorFlow.js | yolov5s_web_model/ | `tfjs`
1516
TensorRT | yolov5s.engine | `engine`
1617
1718
Usage:
18-
$ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs
19+
$ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs
1920
2021
Inference:
2122
$ python path/to/detect.py --weights yolov5s.pt
2223
yolov5s.torchscript
2324
yolov5s.onnx
2425
yolov5s.mlmodel (under development)
26+
yolov5s_openvino_model (under development)
2527
yolov5s_saved_model
2628
yolov5s.pb
2729
yolov5s.tflite
@@ -144,6 +146,23 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
144146
return ct_model
145147

146148

149+
def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')):
150+
# YOLOv5 OpenVINO export
151+
try:
152+
check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
153+
import openvino.inference_engine as ie
154+
155+
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
156+
f = str(file).replace('.pt', '_openvino_model' + os.sep)
157+
158+
cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}"
159+
subprocess.check_output(cmd, shell=True)
160+
161+
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
162+
except Exception as e:
163+
LOGGER.info(f'\n{prefix} export failure: {e}')
164+
165+
147166
def export_saved_model(model, im, file, dynamic,
148167
tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45,
149168
conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')):
@@ -317,15 +336,15 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
317336
imgsz=(640, 640), # image (height, width)
318337
batch_size=1, # batch size
319338
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
320-
include=('torchscript', 'onnx', 'coreml'), # include formats
339+
include=('torchscript', 'onnx'), # include formats
321340
half=False, # FP16 half-precision export
322341
inplace=False, # set YOLOv5 Detect() inplace=True
323342
train=False, # model.train() mode
324343
optimize=False, # TorchScript: optimize for mobile
325344
int8=False, # CoreML/TF INT8 quantization
326345
dynamic=False, # ONNX/TF: dynamic axes
327346
simplify=False, # ONNX: simplify model
328-
opset=14, # ONNX: opset version
347+
opset=12, # ONNX: opset version
329348
verbose=False, # TensorRT: verbose log
330349
workspace=4, # TensorRT: workspace size (GB)
331350
nms=False, # TF: add NMS to model
@@ -338,9 +357,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
338357
t = time.time()
339358
include = [x.lower() for x in include]
340359
tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports
341-
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
342360
file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights)
343361

362+
# Checks
363+
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
364+
opset = 12 if ('openvino' in include) else opset # OpenVINO requires opset <= 12
365+
344366
# Load PyTorch model
345367
device = select_device(device)
346368
assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0'
@@ -372,12 +394,14 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
372394
# Exports
373395
if 'torchscript' in include:
374396
export_torchscript(model, im, file, optimize)
375-
if 'onnx' in include:
397+
if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX
376398
export_onnx(model, im, file, opset, train, dynamic, simplify)
377399
if 'engine' in include:
378400
export_engine(model, im, file, train, half, simplify, workspace, verbose)
379401
if 'coreml' in include:
380402
export_coreml(model, im, file)
403+
if 'openvino' in include:
404+
export_openvino(model, im, file)
381405

382406
# TensorFlow Exports
383407
if any(tf_exports):
@@ -413,7 +437,7 @@ def parse_opt():
413437
parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
414438
parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes')
415439
parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
416-
parser.add_argument('--opset', type=int, default=14, help='ONNX: opset version')
440+
parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version')
417441
parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
418442
parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
419443
parser.add_argument('--nms', action='store_true', help='TF: add NMS to model')

requirements.txt

+1
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ seaborn>=0.11.0
2727
# scikit-learn==0.19.2 # CoreML quantization
2828
# tensorflow>=2.4.1 # TFLite export
2929
# tensorflowjs>=3.9.0 # TF.js export
30+
# openvino-dev # OpenVINO export
3031

3132
# Extras --------------------------------------
3233
# albumentations>=1.0.3

0 commit comments

Comments
 (0)