-
Notifications
You must be signed in to change notification settings - Fork 0
/
cnn.py
94 lines (83 loc) · 2.72 KB
/
cnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#%% packages
import numpy as np
from sklearn.metrics import accuracy_score
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
# %%
transform_super = transforms.Compose(
[transforms.Resize(32),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, ))])
batch_size = 4
trainset = torchvision.datasets.ImageFolder(root='data/train', transform=transform_super)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
testset = torchvision.datasets.ImageFolder(root='data/test', transform=transform_super)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True)
classes = ['healthy', 'sick']
# %% Neural Network setup
class ImageClassificationNet(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 3)
self.fc1 = nn.Linear(16 * 6 * 6, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.pool(x)
x = self.conv2(x)
x = self.relu(x)
x = self.pool(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
x = self.sigmoid(x)
return x
#%% init model
model = ImageClassificationNet()
loss_fn = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.8)
# %% training
NUM_EPOCHS = 10
losses = []
for epoch in range(NUM_EPOCHS):
epoch_loss = 0
for i, data in enumerate(trainloader, 0):
print(f'Epoch {epoch}/{NUM_EPOCHS}, Step {i+1}/{len(trainloader)}')
inputs, labels = data
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, labels.reshape(-1, 1).float())
loss.backward()
optimizer.step()
# add loss to epoch loss
epoch_loss += loss.item()
losses.append(epoch_loss)
#%% losses
import seaborn as sns
sns.lineplot(x=range(NUM_EPOCHS), y=losses)
# %% test
y_test = []
y_test_hat = []
for i, data in enumerate(testloader, 0):
inputs, y_test_temp = data
with torch.no_grad():
y_test_hat_temp = model(inputs).round()
y_test.extend(y_test_temp.numpy())
y_test_hat.extend(y_test_hat_temp.numpy())
# %%
acc = accuracy_score(y_test, y_test_hat)
print(f'Accuracy: {acc*100:.2f} %')
# %%
# We know that data is balanced, so baseline classifier has accuracy of 50 %.