-
Notifications
You must be signed in to change notification settings - Fork 0
/
local_srn.m
executable file
·101 lines (86 loc) · 3.29 KB
/
local_srn.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
function [Wt1, Wt2 ] = local_srn(IN, OUT, nhidnodes, gamma, nepochs,forgettingrate,hidnoise,momentum, beta, trackdevelopment, evolveweights)
% usage [Wt1, Wt2 ] = local_srn(IN, OUT, nhidnodes, gamma, nepochs,forgettingrate,hidnoise,momentum, beta, trackdevelopment, evolveweights)
%
% nnet with one hidden layer and Elman type recurrence
%
% learning with backpropagation
%
% defaults for if we haven't been given vals for gamma & nepochs
if nargin < 4, gamma = 0.05; end
if nargin < 5, nepochs = 100; end
if nargin < 6, forgettingrate = 0.0; end
if nargin < 7, hidnoise = 0.0; end
if nargin < 8, momentum = 0.005; end
if nargin < 9, beta = 1.0; end
if nargin < 10, trackdevelopment = false; end %if true note the weights at end of each epoch
% get the dimensions of our data sets
[datarows, inelem]=size(IN);
[~, outelem]=size(OUT);
% won't track error for srn
% TotError = zeros(nepochs*datarows,2);
if nargin < 11
% initialise random weight matrices
Wt1 = 0.1* randn(nhidnodes,inelem + nhidnodes + 1); % +1 for bias!
Wt2 = 0.1* randn(outelem, nhidnodes + 1);
else
Wt1 = evolveweights.wt1{1};
Wt2 = evolveweights.wt2{1};
end
A = zeros(inelem+nhidnodes,1);
LastHiddenActivation =zeros(nhidnodes,1);
Thid = zeros(nhidnodes,1);
old_dWt1 = 0.0;
old_dWt2 = 0.0;
for n = 1:nepochs
% disp(strcat('epoch ', n));
rp = randperm(datarows);
for p = 1:datarows
q = rp(p);
% get appropriate input & target rows
% though we will represent them as col vectors
A = [IN(q,1:inelem)'; LastHiddenActivation];
T = OUT(q,1:outelem)';
% feedforward
% layer 1
B1 = Wt1*[A;1]; % input & bias
O1 = activation(B1,beta,0);
% is there any noise in transmission?
% add it to the outputs of the hidden layer
% note with the exp this is lognormal
% O1 = O1 + sqrt(hidnoise)*exp(randn(nhidnodes,1));
if hidnoise > 0
O1 = O1 + sqrt(hidnoise)*randn(nhidnodes,1);
end
% store internal state for next loop
% but multiply each value by (1-forgetting rate)
LastHiddenActivation = (1-forgettingrate) * O1;
d_O1 = d_activation(B1,beta,0);
% layer 2
B2 = Wt2*[O1;1]; %output and a bias node
O2 = activation(B2,beta,0);
d_O2 = d_activation(B2,beta,0);
% calculate & apply the delta adjustments to output layer
dWt2 = ((T-O2) .* d_O2) * [O1;1]' ;
% now back propogate the errors
% construct nhidnodes x 1 vector for the interim targets Thid = zeros(nhidnodes,1);
for k = 1:nhidnodes
Thid(k,1) = Wt2(:,k)'* dWt2(:,k); % dot product of kth cols of Wt2 & output error
end
% using this target find weight changes
dWt1 = (Thid .* d_O1) * [A;1]' ;
dWt1 = gamma * dWt1; % apply the learning rate scalar
dWt2 = gamma * dWt2; % apply the learning rate scalar
% shift weight by delta + bit of old delta
Wt1 = Wt1 + dWt1 + momentum * old_dWt1;
Wt2 = Wt2 + dWt2 + momentum * old_dWt2;
% note old error
old_dWt1 = dWt1;
old_dWt2 = dWt2;
end
DevWeights1{n} = Wt1;
DevWeights2{n} = Wt2;
end
if trackdevelopment
Wt1 = DevWeights1;
Wt2 = DevWeights2;
end