forked from gaps013/Deep_Doc_Classifier
-
Notifications
You must be signed in to change notification settings - Fork 0
/
plot_confusion_matrix.py
87 lines (73 loc) · 2.68 KB
/
plot_confusion_matrix.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from __future__ import print_function
import seaborn as sn
import pandas as pd
from collections import OrderedDict
import numpy as np
import sys, os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.models as models
import config as cf
from glob import glob
array = [[33,2,0,0,0,0,0,0,0,1,3],
[3,31,0,0,0,0,0,0,0,0,0],
[0,4,41,0,0,0,0,0,0,0,1],
[0,1,0,30,0,6,0,0,0,0,1],
[0,0,0,0,38,10,0,0,0,0,0],
[0,0,0,3,1,39,0,0,0,0,4],
[0,2,2,0,4,1,31,0,0,0,2],
[0,1,0,0,0,0,0,36,0,2,0],
[0,0,0,0,0,0,1,5,37,5,1],
[3,0,0,0,0,0,0,0,0,39,0],
[0,0,0,0,0,0,0,0,0,0,38]]
def to3channels(image_path):
img = Image.open(image_path)
imarray = np.array(img)
stacked_img = np.stack((imarray,) * 3, axis=-1)
three_channel_image = Image.fromarray(stacked_img, 'RGB')
return three_channel_image
class_vector = os.listdir(r'Data/test/')
class_vector.sort()
model = models.alexnet(pretrained=True)
num_input_features = model.classifier[6].in_features
model.classifier[6] = nn.Linear(num_input_features, 10)
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
state_dict = torch.load('./checkpoint.pth')
new_state_dict = OrderedDict()
# Convert the model keys from gpu trained to cpu type so that we can load the dictionary into AlexNet
for k, v in state_dict.items():
if 'module' not in k:
k = 'module.'+k
else:
k = k.replace('module.module.module.', 'module.')
new_state_dict[k]=v
# load the modified dictionary
model.load_state_dict(new_state_dict)
model.eval()
loader = transforms.Compose([transforms.Scale(cf.crop_size), transforms.ToTensor()])
# Convert the image from single to 3 channels
files=[]
for class_name in os.listdir(r'Data/test/'):
files += glob(r'Data/test/{}/*.jpg'.format(class_name))
scores = np.zeros(shape=(len(class_vector),len(class_vector)))
for file in files:
img = to3channels(file) # Load image as PIL.Image
x = loader(img) # Preprocess image
x = x.unsqueeze(0) # Add batch dimension
output = model(x) # Forward pass
pred = torch.argmax(output, 1) # Get predicted class if multi-class classification
# print('Sample {} predicted as {}'.format(file.split('/')[2], class_vector[pred]))
scores[class_vector.index(file.split('/')[2]), pred] += 1
df_cm = pd.DataFrame(scores, index=[i for i in class_vector],
columns=[i for i in class_vector])
plt.figure(figsize=(10, 10))
sn.heatmap(df_cm, annot=True)
plt.yticks(rotation=0)
plt.show()
print('hold')