-
Notifications
You must be signed in to change notification settings - Fork 1
/
neural_network.py
128 lines (100 loc) · 4.27 KB
/
neural_network.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import numpy as np
from loss import binary_cross_entropy
from adam import Adam
class Sequential:
def __init__(self):
self.loss_function = None
self.optimizer = None
self.epochs = None
self.layers = []
self.batch_size = 4
self.loss = []
self.accuracies = []
self.val_loss = []
self.val_accuracies = []
def add(self, layer):
self.layers.append(layer)
def compile(self, loss='binary_cross_entropy', optimizer='adam'):
if loss == 'binary_cross_entropy':
self.loss_function = binary_cross_entropy
else:
raise ValueError(f"{loss} has not been added yet!")
if optimizer == 'adam':
self.optimizer = Adam()
self.optimizer.init_params(self.layers)
else:
raise ValueError(f"{loss} has not been added yet!")
for idx, layer in enumerate(self.layers):
print(f"Layer {str(idx+1)} ; Name: {layer.__class__.__name__} ; Output Shape: {layer.output_shape}")
def fit(self, x, y, epochs, batch_size, val_x=None, val_y=None):
self.epochs = epochs
self.batch_size = batch_size
iteration = 0
for epoch in range(1, epochs):
idx = 0
batch_no = 0
if y.shape[1] % batch_size:
no_batches = (y.shape[1] // batch_size) + 1
else:
no_batches = y.shape[1] // batch_size
print(f"\nEpoch {epoch}/{epochs}")
while idx < y.shape[1]:
batch_no += 1
x_batch = x[idx:idx+self.batch_size]
y_batch = y[:, idx:idx+self.batch_size]
idx += self.batch_size
prev_act = x_batch.copy()
prev_act = self.predict(prev_act)
cost, dZ = self.loss_function(y_batch, prev_act)
self.loss.append(cost)
y_pred = prev_act.reshape(1, -1)
accuracy = np.mean(np.round(y_pred) == y_batch)
self.accuracies.append(accuracy)
print(f"Batch {batch_no}/{no_batches} loss: {sum(self.loss)/len(self.loss)} "
f"accuracy: {sum(self.accuracies)/len(self.accuracies)}", end='\r')
gradients = []
for layer in reversed(self.layers):
dZ, gradient = layer.backprop(dZ)
gradients.append(gradient)
iteration += 1
gradients.reverse()
optimizations = self.optimizer.optimize(self.layers, gradients, batch_size, iteration)
for i in range(len(self.layers)):
if optimizations[i][0] is None:
continue
else:
self.layers[i].update_params(optimizations[i])
print(f"Batch {batch_no}/{no_batches} loss: {sum(self.loss) / len(self.loss)} "
f"accuracy: {sum(self.accuracies) / len(self.accuracies)}", end='')
if val_x is not None:
val_dict = self.evaluate(val_x, val_y)
self.val_loss.extend(val_dict['batch_loss'])
self.val_accuracies.extend(val_dict['batch_accuracy'])
print(f" val_loss: {sum(self.val_loss)/len(self.val_loss)} "
f"val_accuracy: {sum(self.val_accuracies)/len(self.val_accuracies)}")
def predict(self, x):
for layer in self.layers:
x = layer.forward(x)
return x
def evaluate(self, x, y):
idx = 0
batch_no = 0
loss, accuracies = [], []
while idx < y.shape[1]:
batch_no += 1
x_batch = x[idx:idx + self.batch_size]
y_batch = y[:, idx:idx + self.batch_size]
idx += self.batch_size
prev_act = x_batch.copy()
prev_act = self.predict(prev_act)
cost, _ = self.loss_function(y_batch, prev_act)
loss.append(cost)
y_pred = prev_act.reshape(1, -1)
accuracy = np.mean(np.round(y_pred) == y_batch)
accuracies.append(accuracy)
return {
'loss': sum(loss) / len(loss),
'accuracy': sum(accuracies) / len(accuracies),
'batch_loss': loss,
'batch_accuracy': accuracies
}