-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathvalidate_from_file.py
193 lines (161 loc) · 5.4 KB
/
validate_from_file.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
"""
Serve as a convenient wrapper for validate.py.
"""
import glob
import os
import re
import IPython
import yaml
from ptsemseg.loader import get_data_path
from utils import validate_parser
from validate import validate, result_root
ROOT = os.getcwd()
M_ROOT = "runs/"
os.chdir(ROOT)
def final_run_dirs(args):
if args.dataset == "drive":
run_dirs = [
]
elif args.dataset == "epfl":
run_dirs = [
]
elif args.dataset == "eyth":
run_dirs = [
]
elif args.dataset == 'egohand':
run_dirs = [
]
elif args.dataset == 'gtea':
run_dirs = [
]
elif args.dataset == 'hofhand':
run_dirs = [
]
elif args.dataset == 'road':
run_dirs = [
]
elif args.dataset == 'cityscapes':
run_dirs = [
]
elif args.dataset == 'epflhand-new':
run_dirs = [
]
else:
raise ValueError(f"Dataset not supported {args.dataset}")
return run_dirs
def process_args_from_loaded_cfg(cfg, args):
"""
process arguments from loaded CFG.
:param cfg:
:param args:
:return:
"""
# If calling this, it will use CFG for sure.
# if 'runet' in cfg['model']['arch']:
# args.hidden_size = cfg['model']['hidden_size']
#
r = re.compile('-h\d+-')
r_d = re.compile('\d+')
_h = r.findall(cfg['logdir'])
if len(_h) > 0:
res = int(r_d.findall(_h[0])[0])
if res > 1: # avoid the represent h=init
args.hidden_size = res
else:
res = args.hidden_size
if cfg['model'].get('hidden_size'):
assert cfg['model']['hidden_size'] == res
else:
cfg['model']['hidden_size'] = res
args.gate = cfg['model'].get('gate') or args.gate
if args.is_recurrent is not None:
args.is_recurrent = cfg['training']['loss']['name'] \
in ['multi_step_cross_entropy']
out_path = args.out_path or os.path.join(
'results', cfg['data']['dataset'],
os.path.basename(os.path.dirname(cfg['logdir'])))
cfg['eval_out_path'] = out_path
# Process for unet_level
if cfg['model']['arch'] == "runet":
if cfg['model'].get('unet_level'):
args.unet_level = cfg['model']['unet_level']
else:
unet_level = args.hidden_size // 32
args.unet_level = unet_level
cfg['model']['unet_level'] = args.unet_level
cfg['model']['recurrent_level'] = args.recurrent_level
if not os.path.exists(cfg['data']['path']):
cfg['data']['path'] = get_data_path(
cfg['data']['path'],
config_file=f"configs/dataset/{cfg['data']['dataset'].replace('_', '')}.yml")
IPython.embed()
return cfg, args
def load_complete_info_from_dir(run_dir, args):
"""
Load model from given directory.
Must work with the complete config.yaml given in the folder.
:param run_dir:
:return:
"""
run_dir = os.path.join(M_ROOT, run_dir)
# Starting from this, should be reused.
try:
config_path = glob.glob(run_dir + '/config*')[0]
except IndexError as e:
try:
config_path = glob.glob(run_dir + '/*.yaml')[0]
except IndexError as e:
try:
config_path = glob.glob(run_dir + '/*.yml')[0]
except IndexError as e:
raise FileNotFoundError(f"nothing is found in {run_dir}.")
with open(config_path, 'r') as f:
cfg = yaml.load(f)
if os.path.exists(run_dir):
cfg['logdir'] = run_dir
else:
assert os.path.exists(cfg['logdir'])
# IPython.embed()
best_path = os.path.join(run_dir, cfg['training']['resume'])
if not os.path.exists(best_path):
# try to search best_model
best_path = glob.glob(f"{run_dir}/*best_model*")[0]
if not os.path.exists(best_path):
raise FileNotFoundError(f"No file found at {run_dir}")
cfg['training']['resume'] = os.path.basename(best_path)
# for EPFL-hand, set to -1 by force. do not use the croping.
if cfg['data']['dataset'] == "epfl_hand_roi":
cfg['data']['dataset'] = 'epfl_hand'
cfg['data']['void_class'] = -1
# if 'road' in cfg['data']['dataset']:
if 'road' or 'cityscapes' in cfg['data']['dataset']:
cfg['training']['validate_batch_size'] = 1
cfg['training']['batch_size'] = 1
cfg, args = process_args_from_loaded_cfg(cfg, args)
cfg['model']['steps'] = args.steps
return cfg, args
def run_validate(args, run_dirs, result_dir=None, roi_only=False):
"""
run multiple validation here.
"""
for i, r_dir in enumerate(run_dirs):
cfg, args = load_complete_info_from_dir(r_dir, args)
result_dir = result_dir or os.path.join('results', cfg['data']['dataset'])
results = validate(cfg, args, roi_only=roi_only)
logdir = result_dir
if not os.path.exists(logdir):
os.makedirs(logdir)
result_path = logdir + f"{cfg['model']['arch']}" + '.yml'
with open(result_path, 'w') as f:
yaml.dump(results, f, default_flow_style=False)
if __name__ == '__main__':
parser = validate_parser()
args = parser.parse_args()
args.eval_flip = True
args.measure_time = True
args.structure = 'baseline'
args.batch_size = 1
roi_only = False
run_dirs = final_run_dirs(args)
del args.dataset
run_validate(args, run_dirs, roi_only=roi_only)