-
Notifications
You must be signed in to change notification settings - Fork 0
/
Neuron.cpp
114 lines (77 loc) · 2.82 KB
/
Neuron.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
#include "Neuron.h"
double Neuron::eta = 0.1; // overall net learning rate, [0.0..1.0]
double Neuron::alpha = 0.5; // momentum, multiplier of last deltaWeight, [0.0..n]
Neuron::Neuron(unsigned numOutputs, unsigned myIndex)
{
for (unsigned c = 0; c < numOutputs; c++) {
m_outputWeights.push_back(Connection());
m_outputWeights.back().weight = randomWeight();
}
m_myIndex = myIndex;
}
void Neuron::setOutputVal(double val) { m_outputVal = val; }
double Neuron::getOutputVal(void) const { return m_outputVal; }
void Neuron::feedForward(const Layer &prevLayer)
{
double sum = 0.0;
// Sum the previous layer's outputs (which are our inputs)
// Include the bias node from the previous layer
for (unsigned n = 0; n < prevLayer.size(); n++){
sum += prevLayer[n].getOutputVal() *
prevLayer[n].m_outputWeights[m_myIndex].weight;
}
m_outputVal = Neuron::transferFunction(sum);
}
void Neuron::calcOutputGradients(double targetVal)
{
double delta = targetVal - m_outputVal;
m_gradient = delta * Neuron::transferFunctionDerivative(m_outputVal);
}
void Neuron::calcHiddenGradients(const Layer &nextLayer)
{
double dow = Neuron::sumDOW(nextLayer);
m_gradient = dow * Neuron::transferFunctionDerivative(m_outputVal);
}
void Neuron::updateInputWeights(Layer &prevLayer)
{
// The weights to be updated are in the Connection container
// in the neurons in the preceeding layer
for (unsigned n = 0; n < prevLayer.size(); n++) {
Neuron &neuron = prevLayer[n];
double oldDeltaWeight = neuron.m_outputWeights[m_myIndex].deltaWeight;
double newDeltaWeight =
// Individual input, magnified by the gradient and train rate:
eta
* neuron.getOutputVal()
* m_gradient
// Also add momentum = a fraction of the previous delta weight
+ alpha
* oldDeltaWeight;
neuron.m_outputWeights[m_myIndex].deltaWeight = newDeltaWeight;
neuron.m_outputWeights[m_myIndex].weight += newDeltaWeight;
}
}
double Neuron::transferFunction(double x)
{
// tanh - output range [-1.0..1.0]
// return tanh(x);
// output range [-1.0..1.0]
// return x / (1 + abs(x));
// output range [0..1.0]
return 1 / (1+ exp(-x));
}
double Neuron::transferFunctionDerivative(double x)
{
// return 1 - x*x;
// return 1 / pow((abs(x) + 1), 2);
return exp(x) / pow((exp(x) + 1), 2);
}
double Neuron::randomWeight(void) { return rand() / double(RAND_MAX); }
double Neuron::sumDOW(const Layer &nextLayer) const
{
double sum = 0.0;
// Sum our contribution of the errors at the nodes we feed
for (unsigned n = 0; n < nextLayer.size() - 1; n++){
sum += m_outputWeights[n].weight * nextLayer[n].m_gradient;
}
}