-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathconv.py
72 lines (64 loc) · 2.84 KB
/
conv.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
from rknn.api import RKNN
import os
# for suffix in ["n", "s", "m", "l", "x"]:
for suffix in ["n", "s", "m", "l", "x"]:
# for soc in ["rk3562","rk3566", "rk3568", "rk3588"]:
for soc in ["rk3588"]:
# for QUANTIZATION in [True, False]:
for QUANTIZATION in [True]:
INPUT_MODEL = 'yolov8{}.onnx'.format(suffix)
WIDTH = 320
HEIGHT = 320
OUTPUT_MODEL_BASENAME = 'yolov8{}'.format(suffix)
# QUANTIZATION = False
DATASET = './datasets/coco20/dataset_coco20.txt'
# Config
MEAN_VALUES = [[0, 0, 0]]
STD_VALUES = [[255, 255, 255]]
QUANT_IMG_RGB2BGR = True
QUANTIZED_DTYPE = "asymmetric_quantized-8"
QUANTIZED_ALGORITHM = "normal"
QUANTIZED_METHOD = "channel"
FLOAT_DTYPE = "float16"
OPTIMIZATION_LEVEL = 2
TARGET_PLATFORM = soc
CUSTOM_STRING = None
REMOVE_WEIGHT = None
COMPRESS_WEIGHT = False
SINGLE_CORE_MODE = False
MODEL_PRUNNING = False
OP_TARGET = None
DYNAMIC_INPUT = None
if QUANTIZATION:
quant_suff = "-i8"
else:
quant_suff = ""
OUTPUT_MODEL_FILE = "./output/{}/{}-{}x{}{}-{}.rknn".format(soc, OUTPUT_MODEL_BASENAME, WIDTH, HEIGHT, quant_suff, soc)
os.makedirs("./output/{}".format(soc), exist_ok=True)
rknn = RKNN()
rknn.config(mean_values=MEAN_VALUES,
std_values=STD_VALUES,
quant_img_RGB2BGR=QUANT_IMG_RGB2BGR,
quantized_dtype=QUANTIZED_DTYPE,
quantized_algorithm=QUANTIZED_ALGORITHM,
quantized_method=QUANTIZED_METHOD,
float_dtype=FLOAT_DTYPE,
optimization_level=OPTIMIZATION_LEVEL,
target_platform=TARGET_PLATFORM,
custom_string=CUSTOM_STRING,
remove_weight=REMOVE_WEIGHT,
compress_weight=COMPRESS_WEIGHT,
single_core_mode=SINGLE_CORE_MODE,
model_pruning=MODEL_PRUNNING,
op_target=OP_TARGET,
dynamic_input=DYNAMIC_INPUT)
# if rknn.load_pytorch("./input/" + INPUT_MODEL, [[HEIGHT, WIDTH, 3]]) != 0:
if rknn.load_onnx("./input/" + INPUT_MODEL) != 0:
print('Error loading model.')
exit()
if rknn.build(do_quantization=QUANTIZATION, dataset=DATASET) != 0:
print('Error building model.')
exit()
if rknn.export_rknn(OUTPUT_MODEL_FILE) != 0:
print('Error exporting rknn model.')
exit()