-
Notifications
You must be signed in to change notification settings - Fork 5
/
submission.py
115 lines (82 loc) · 3.69 KB
/
submission.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import skimage
import skimage.io
import skimage.transform
import numpy as np
import time
import math
from utils import preprocess
from models import *
# 2012 data /media/jiaren/ImageNet/data_scene_flow_2012/testing/
parser = argparse.ArgumentParser(description='PSMNet')
parser.add_argument('--KITTI', default='2015', help='KITTI version')
parser.add_argument('--datapath', default='/home/lzd/DATA/KITTI/2015/training/', help='select model')
parser.add_argument('--loadmodel', default=None, help='loading model')
parser.add_argument('--maxdisp', type=int, default=192, help='maxium disparity')
parser.add_argument('--no_cuda', action='store_true', default=False, help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.KITTI == '2015':
from dataloader import KITTI_submission_loader as DA
else:
from dataloader import KITTI_submission_loader2012 as DA
test_left_img, test_right_img = DA.dataloader(args.datapath)
model = stackhourglass(args.maxdisp)
model = nn.DataParallel(model, device_ids=[0])
model.cuda()
if args.loadmodel is not None:
state_dict = torch.load(args.loadmodel)
model.load_state_dict(state_dict['state_dict'])
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
def test(imgL,imgR):
model.eval()
if args.cuda:
imgL = torch.FloatTensor(imgL).cuda()
imgR = torch.FloatTensor(imgR).cuda()
imgL, imgR= Variable(imgL), Variable(imgR)
with torch.no_grad():
output = model(imgL,imgR)
output = torch.squeeze(output)
pred_disp = output.data.cpu().numpy()
return pred_disp
def main():
processed = preprocess.get_transform(augment=False) #normalization
t = 0
for inx in range(len(test_left_img)):
imgL_o = (skimage.io.imread(test_left_img[inx]).astype('float32'))
imgR_o = (skimage.io.imread(test_right_img[inx]).astype('float32'))
imgL = processed(imgL_o).numpy()
imgR = processed(imgR_o).numpy()
imgL = np.reshape(imgL,[1,3,imgL.shape[1],imgL.shape[2]])
imgR = np.reshape(imgR,[1,3,imgR.shape[1],imgR.shape[2]])
top_pad = 384-imgL.shape[2]
right_pad = 1248-imgL.shape[3]
imgL = np.lib.pad(imgL, ((0,0),(0,0),(top_pad,0), (0,right_pad)), mode='constant', constant_values=0)
imgR = np.lib.pad(imgR, ((0,0),(0,0),(top_pad,0), (0,right_pad)), mode='constant', constant_values=0)
start_time = time.time()
pred_disp = test(imgL,imgR)
t += time.time() - start_time
print('time = %.2f' %(time.time() - start_time))
top_pad = 384-imgL_o.shape[0]
right_pad = 1248-imgL_o.shape[1]
img = pred_disp[top_pad : , : -right_pad]
skimage.io.imsave('./disp_image_' + str(args.KITTI) + '/' + test_left_img[inx].split('/')[-1], (img*256).astype('uint16')) #第一个参数表示保存的路径和名称,第二个参数表示需要保存的数组标量
# skimage.io.imsave('./disp_image_' + str(args.KITTI) + '/' + test_left_img[inx].split('/')[-1], (img).astype('uint8')) #第一个参数表示保存的路径和名称,第二个参数表示需要保存的数组标量
mean_time = t/(len(test_left_img))
print(mean_time)
if __name__ == '__main__':
main()