-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
83 lines (70 loc) · 2.46 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import torch
import torch.nn as nn
import torch.nn.functional as F
class GTSRNet(nn.Module):
def __init__(
self,
n_classes,
img_ch=1,
channels=[16, 32, 64, 128, 200],
device=['cuda' if torch.cuda.is_available() else 'cpu'][0],
kernel=3,
):
super().__init__()
self.device = device
self.channels = channels
self.Conv = nn.Sequential()
self.Conv.add_module(
"conv0",
convblock(ch_in=img_ch, ch_out=channels[0], kernel_sz=kernel, block=0),
)
for k in range(1, len(channels)):
self.Conv.add_module(
f"conv{k}",
convblock(
ch_in=channels[k - 1], ch_out=channels[k], kernel_sz=kernel, block=k
),
)
self.conv_drop = nn.Dropout2d()
self.fc1 = nn.Linear(self.channels[-1]*8*8, 350)
self.fc2 = nn.Linear(350, n_classes)
def forward(self, x, normalize=False, verbose=False):
# encoding path
xout = []
x = self.Conv[0](x)
xout.append(x)
for k in range(1, len(self.Conv)):
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
x = self.Conv[k](x)
xout.append(x)
# transform the input
x = self.conv_drop(x)
x = x.view(-1, self.channels[-1]*8*8)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class convblock(nn.Module):
def __init__(self, ch_in, ch_out, kernel_sz, block=-1):
super().__init__()
self.conv = nn.Sequential()
self.block = block
if self.block != 0:
self.conv.add_module("conv_0", batchconv(ch_in, ch_out, kernel_sz))
else:
self.conv.add_module("conv_0", batchconv0(ch_in, ch_out, kernel_sz))
self.conv.add_module("conv_1", batchconv(ch_out, ch_out, kernel_sz))
def forward(self, x):
x = self.conv[1](self.conv[0](x))
return x
def batchconv0(ch_in, ch_out, kernel_sz):
return nn.Sequential(
nn.BatchNorm2d(ch_in, eps=1e-5, momentum=0.1),
nn.Conv2d(ch_in, ch_out, kernel_sz, padding=kernel_sz // 2, bias=False),
)
def batchconv(ch_in, ch_out, sz):
return nn.Sequential(
nn.BatchNorm2d(ch_in, eps=1e-5, momentum=0.1),
nn.ReLU(inplace=True),
nn.Conv2d(ch_in, ch_out, sz, padding=sz // 2, bias=False),
)