forked from cogsys-tuebingen/mobilestereonet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
94 lines (73 loc) · 3.76 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
from __future__ import print_function, division
import os
import time
import argparse
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from datasets import __datasets__
from models import __models__
from utils import *
cudnn.benchmark = True
parser = argparse.ArgumentParser(description='MobileStereoNet')
parser.add_argument('--model', default='MSNet2D', help='select a model structure', choices=__models__.keys())
parser.add_argument('--maxdisp', type=int, default=192, help='maximum disparity')
parser.add_argument('--dataset', help='dataset name', choices=__datasets__.keys())
parser.add_argument('--datapath', required=True, help='data path')
parser.add_argument('--testlist', required=True, help='testing list')
parser.add_argument('--loadckpt', required=True, help='load the weights from a specific checkpoint')
parser.add_argument('--colored', default=1, help='save colored or save for benchmark submission')
parser.add_argument('--test_batch_size', type=int, default=4, help='testing batch size')
parser.add_argument('--hourglass_size', type=int, default=48, help='hourglass_size')
parser.add_argument('--dres_expanse_ratio', type=int, default=3, help='dres_expanse_ratio')
parser.add_argument('--num_groups', type=int, default=1, help='num_groups')
parser.add_argument('--volume_size', type=int, default=48, help='volume_size')
# parse arguments
args = parser.parse_args()
# dataset, dataloader
StereoDataset = __datasets__[args.dataset]
test_dataset = StereoDataset(args.datapath, args.testlist, False)
TestImgLoader = DataLoader(test_dataset, args.test_batch_size, shuffle=False, num_workers=4, drop_last=False)
# model, optimizer
model = __models__[args.model](args.maxdisp, args.hourglass_size, args.dres_expanse_ratio, args.num_groups, args.volume_size)
model = nn.DataParallel(model)
model.cuda()
# load parameters
print("Loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict['model'])
def test(args):
print("Generating the disparity maps...")
os.makedirs('./predictions', exist_ok=True)
avg_test_scalars = AverageMeterDict()
for batch_idx, sample in enumerate(TestImgLoader):
start_time = time.time()
scalar_outputs, image_outputs = test_sample(sample)
avg_test_scalars.update(scalar_outputs)
del scalar_outputs, image_outputs
print('Iter {}/{}, time = {:3f}'.format(batch_idx,
len(TestImgLoader),
time.time() - start_time))
avg_test_scalars = avg_test_scalars.mean()
print("avg_test_scalars", avg_test_scalars)
print("Done!")
# test one sample
@make_nograd_func
def test_sample(sample):
model.eval()
imgL, imgR, disp_gt = sample['left'], sample['right'], sample['disparity']
imgL = imgL.cuda()
imgR = imgR.cuda()
disp_gt = disp_gt.cuda()
disp_ests = model(imgL, imgR)
mask = (disp_gt < args.maxdisp) & (disp_gt > 0)
scalar_outputs = {}
image_outputs = {"disp_est": disp_ests, "disp_gt": disp_gt, "imgL": imgL, "imgR": imgR}
scalar_outputs["D1"] = [D1_metric(disp_est, disp_gt, mask) for disp_est in disp_ests]
scalar_outputs["EPE"] = [EPE_metric(disp_est, disp_gt, mask) for disp_est in disp_ests]
scalar_outputs["Thres1"] = [Thres_metric(disp_est, disp_gt, mask, 1.0) for disp_est in disp_ests]
scalar_outputs["Thres2"] = [Thres_metric(disp_est, disp_gt, mask, 2.0) for disp_est in disp_ests]
scalar_outputs["Thres3"] = [Thres_metric(disp_est, disp_gt, mask, 3.0) for disp_est in disp_ests]
return tensor2float(scalar_outputs), image_outputs
if __name__ == '__main__':
test(args)