-
Notifications
You must be signed in to change notification settings - Fork 5
/
val.py
56 lines (50 loc) · 2.03 KB
/
val.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import numpy as np
import torch
from medpy import metric
from scipy.ndimage import zoom
def calculate_metric_percase(pred, gt):
pred[pred > 0] = 1
gt[gt > 0] = 1
if pred.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return dice, hd95
else:
return 0, 0
@torch.no_grad()
def test_single_volume(image, label, net, classes, patch_size=[256, 256]):
image, label = image.squeeze(0).cpu().detach(
).numpy(), label.squeeze(0).cpu().detach().numpy()
if len(image.shape) == 3:
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
slice = zoom(
slice, (patch_size[0] / x, patch_size[1] / y), order=0)
input = torch.from_numpy(slice).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
out_aux1, out_aux2 = net(input)[0], net(input)[1]
out_aux1_soft = torch.softmax(out_aux1, dim=1)
out_aux2_soft = torch.softmax(out_aux2, dim=1)
out = torch.argmax((out_aux1_soft + out_aux2_soft) * 0.5, dim=1).squeeze(0)
out = out.cpu().detach().numpy()
pred = zoom(
out, (x / patch_size[0], y / patch_size[1]), order=0)
prediction[ind] = pred
else:
input = torch.from_numpy(image).float().cuda()
net.eval()
with torch.no_grad():
out_aux1, out_aux2 = net(input)[0], net(input)[1]
out_aux1_soft = torch.softmax(out_aux1, dim=1)
out_aux2_soft = torch.softmax(out_aux2, dim=1)
out = torch.argmax((out_aux1_soft + out_aux2_soft) * 0.5, dim=1).squeeze(0)
prediction = out.cpu().detach().numpy()
metric_list = []
for i in range(1, classes):
metric_list.append(calculate_metric_percase(
prediction == i, label == i))
return metric_list