Skip to content

Commit

Permalink
Implemented Decoupled Neural Interface
Browse files Browse the repository at this point in the history
  • Loading branch information
csxeba committed Jan 16, 2018
1 parent e85d08c commit 5f741e8
Show file tree
Hide file tree
Showing 3 changed files with 113 additions and 6 deletions.
15 changes: 9 additions & 6 deletions model/backpropagation.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,21 @@ def learn_batch(self, X, Y, w=None):
delta = self.cost.derivative(preds, Y)
if w is not None:
delta *= w[:, None]
self.layers.set_weights(
self.optimizer.optimize(
self.layers.get_weights(unfold=True),
self.backpropagate(delta), m
)
)
self.backpropagate(delta)
self.update(m)
return self.cost(self.output, Y) / m

def update(self, m):
W = self.layers.get_weights(unfold=True)
gW = self.get_gradients(unfold=True)
self.layers.set_weights(self.optimizer.optimize(W, gW, m))

def backpropagate(self, error):
# TODO: optimize this, skip untrainable layers at the beginning
for layer in self.layers[-1:0:-1]:
error = layer.backpropagate(error)
if error is None:
break
return self.get_gradients(unfold=True)

def get_gradients(self, unfold=True):
Expand Down
82 changes: 82 additions & 0 deletions xperiments/xp_decoupled.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
from collections import deque

from csxdata.utilities.loader import pull_mnist_data

from brainforge import BackpropNetwork
from brainforge.layers.abstract_layer import LayerBase, NoParamMixin
from brainforge.layers import DenseLayer


class DNI(NoParamMixin, LayerBase):

def __init__(self, synth: BackpropNetwork=None, **kw):
super().__init__(**kw)
self.synth = synth
self.memory = deque()
self._predictor = None
self._previous = None

def _default_synth(self):
synth = BackpropNetwork(input_shape=self.inshape, layerstack=[
DenseLayer(self.inshape[0], activation="tanh"),
DenseLayer(self.inshape[0], activation="linear"),
], cost="mse", optimizer="sgd")
return synth

def connect(self, to, inshape):
super().connect(to, inshape)
self._previous = to.layers[-1]
if self.synth is None:
self.synth = self._default_synth()

def feedforward(self, X):
delta = self.synth.predict(X)
self._previous.backpropagate(delta)
if self.brain.learning:
self.memory.append(delta)
return X

def backpropagate(self, delta):
m = self.memory.popleft()
print(f"\rSynth cost: {self.synth.cost(m, delta).sum():.4f}", end="")
self.synth.learn_batch(m, delta)

@property
def outshape(self):
return self.inshape

@classmethod
def from_capsule(cls, capsule):
pass

def __str__(self):
return "DNI"


def build_decoupled_net(inshape, outshape):
net = BackpropNetwork(input_shape=inshape, layerstack=[
DenseLayer(60, activation="tanh"), DNI(),
DenseLayer(outshape, activation="softmax")
], cost="xent", optimizer="adam")
return net


def build_normal_net(inshape, outshape):
net = BackpropNetwork(input_shape=inshape, layerstack=[
DenseLayer(60, activation="tanh"),
DenseLayer(outshape, activation="softmax")
], cost="xent", optimizer="adam")
return net


def xperiment():
lX, lY, tX, tY = pull_mnist_data()
net = build_decoupled_net(lX.shape[1:], lY.shape[1:])
for epoch in range(30):
net.fit(lX, lY, batch_size=128, epochs=1, verbose=0)
cost, acc = net.evaluate(tX, tY)
print(f"\nEpoch {epoch} done! Network accuracy: {acc:.2%}")


if __name__ == '__main__':
xperiment()
22 changes: 22 additions & 0 deletions xperiments/xp_dnc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import numpy as np

from brainforge.atomic.activation import OnePlus


oneplus = OnePlus()


class DNC:

def __init__(self, controller, reads):
self.ctrl = controller
self.memory = None
self.usage = None
self.link = None
self.reads = np.zeros(())

def forward_step(self, x, reads):
Z = np.concatenate((x, reads.flat))

def feedforward(self, X):
pass

0 comments on commit 5f741e8

Please sign in to comment.