Skip to content

Commit

Permalink
Bugfixes and setup.py
Browse files Browse the repository at this point in the history
  • Loading branch information
csxeba committed Mar 4, 2020
1 parent 183e860 commit b0ade67
Show file tree
Hide file tree
Showing 22 changed files with 104 additions and 62 deletions.
10 changes: 5 additions & 5 deletions Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ implemented:
```python
import numpy as np

from brainforge import LayerStack, BackpropNetwork
from brainforge import LayerStack, Backpropagation
from brainforge.layers import DenseLayer

def input_stream(batchsize=20):
Expand Down Expand Up @@ -307,7 +307,7 @@ For more complicated tasks, the use of the dataframe library csxdata is suggeste
```python
from csxdata import CData

from brainforge import BackpropNetwork
from brainforge import Backpropagation
from brainforge.layers import (
DenseLayer, DropOut, Activation,
PoolLayer, ConvLayer, Flatten
Expand Down Expand Up @@ -340,7 +340,7 @@ model.fit(X, Y, batch_size=20, epochs=30, validation=valid,
```python
from csxdata import Sequence

from brainforge import BackpropNetwork
from brainforge import Backpropagation
from brainforge.layers import DenseLayer, LSTM

datapath = "path/to/text/file.txt"
Expand Down Expand Up @@ -383,7 +383,7 @@ import time
import numpy as np
from matplotlib import pyplot as plt

from brainforge import BackpropNetwork
from brainforge import Backpropagation
from brainforge.layers import DenseLayer, DropOut
from brainforge.evolution import Population, to_phenotype

Expand Down Expand Up @@ -477,7 +477,7 @@ from collections import deque

import gym

from brainforge import BackpropNetwork
from brainforge import Backpropagation
from brainforge.layers import DenseLayer
from brainforge.reinforcement import DQN, agentconfig
from brainforge.optimization import RMSprop
Expand Down
2 changes: 1 addition & 1 deletion brainforge/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from brainforge.config import set_globals
from brainforge.model import LayerStack
from brainforge.learner import BackpropNetwork
from brainforge.learner import Backpropagation
from brainforge.learner import NeuroEvolution
from brainforge.learner import DirectFeedbackAlignment
from brainforge.learner import ExtremeLearningMachine
Expand Down
4 changes: 2 additions & 2 deletions brainforge/gradientcheck/gradientcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

from .raw_gradients import analytical_gradients, numerical_gradients
from .analyze_difference import analyze_difference_matrices, get_results
from ..learner import BackpropNetwork
from ..learner import Backpropagation


def run(network: BackpropNetwork, X=None, Y=None, epsilon=1e-5, throw=False, display=True):
def run(network: Backpropagation, X=None, Y=None, epsilon=1e-5, throw=False, display=True):
if X is None:
X = np.random.normal(scale=0.1, size=network.input_shape)
if Y is None:
Expand Down
2 changes: 1 addition & 1 deletion brainforge/learner/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .backpropagation import BackpropNetwork
from .backpropagation import Backpropagation
from .neuroevolution import NeuroEvolution
from .abstract_learner import Learner
from .feedback_alignment import DirectFeedbackAlignment
Expand Down
58 changes: 42 additions & 16 deletions brainforge/learner/abstract_learner.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,28 +15,45 @@ def __init__(self, layerstack, cost="mse", name="", **kw):
self.age = 0
self.cost = _costs.get(cost)

def fit_generator(self, generator, lessons_per_epoch, epochs=30, metrics=(), validation=(), verbose=1, **kw):
def fit_generator(self,
generator,
lessons_per_epoch,
epochs=30,
metrics=(),
validation=(),
validation_steps=None,
verbose=1, **kw):

metrics = [_metrics.get(metric) for metric in metrics]
history = logging.MetricLogs.from_metric_list(lessons_per_epoch, ("cost",), metrics)
lstr = len(str(epochs))
for epoch in range(1, epochs+1):
if verbose:
print("Epoch {:>{w}}/{}".format(epoch, epochs, w=lstr))
epoch_history = self.epoch(generator, updates_per_epoch=lessons_per_epoch, metrics=metrics,
validation=validation, verbose=verbose, **kw)
validation=validation, validation_steps=validation_steps, verbose=verbose, **kw)
history.update(epoch_history)

return history

def fit(self, X, Y, batch_size=20, epochs=30, metrics=(), validation=(), verbose=1, shuffle=True, **kw):
def fit(self, X, Y,
batch_size=20,
epochs=30,
metrics=(),
validation=(),
validation_steps=None,
verbose=1,
shuffle=True,
**kw):

metrics = [_metrics.get(metric) for metric in metrics]
datastream = batch_stream(X, Y, m=batch_size, shuffle=shuffle)
return self.fit_generator(datastream, len(X) // batch_size, epochs, metrics, validation, verbose, **kw)
return self.fit_generator(datastream, len(X) // batch_size, epochs, metrics, validation, validation_steps,
verbose, **kw)

def epoch(self, generator, updates_per_epoch, metrics=(), validation=None, verbose=1, **kw):
def epoch(self, generator, updates_per_epoch, metrics=(), validation=None, validation_steps=None, verbose=1, **kw):
metrics = [_metrics.get(metric) for metric in metrics]
history = logging.MetricLogs.from_metric_list(updates_per_epoch, ["cost"], metrics)
done = 0

self.layers.learning = True
batch_size = 0
Expand All @@ -50,8 +67,13 @@ def epoch(self, generator, updates_per_epoch, metrics=(), validation=None, verbo

self.layers.learning = False
if verbose and validation:
history = self.evaluate(*validation, batch_size=batch_size, metrics=metrics)
history.log(prefix=" ", suffix="")
if type(validation) in (tuple, list):
eval_history = self.evaluate(*validation, batch_size=batch_size, metrics=metrics, verbose=False)
else:
if validation_steps is None:
raise RuntimeError("If validating on a stream, validation_steps must be set to a positive integer.")
eval_history = self.evaluate_stream(validation, validation_steps, metrics, verbose=False)
eval_history.log(prefix=" ", suffix="")
if verbose:
print()

Expand All @@ -70,24 +92,28 @@ def evaluate_batch(self, x, y, metrics=()):
eval_metrics[str(metric).lower()] = metric(preds, y) / m
return eval_metrics

def evaluate(self, X, Y, batch_size=32, metrics=(), verbose=False):
metrics = [_metrics.get(metric) for metric in metrics]
N = X.shape[0]
batch_size = min(batch_size, N)
steps = int(round(N / batch_size))
def evaluate_stream(self, stream, steps, metrics=(), verbose=False):
history = logging.MetricLogs.from_metric_list(steps, ["cost"], metrics)

for x, y in batch_stream(X, Y, m=batch_size, shuffle=False, infinite=False):
metrics = [_metrics.get(metric) for metric in metrics]
for x, y in stream:
eval_metrics = self.evaluate_batch(x, y, metrics)
history.record(eval_metrics)
if verbose:
history.log("\r", end="")

if verbose:
print()
history.reduce_mean()
return history

def evaluate(self, X, Y, batch_size=32, metrics=(), verbose=False):
N = X.shape[0]
batch_size = min(batch_size, N)
steps = int(round(N / batch_size))

stream = batch_stream(X, Y, m=batch_size, shuffle=False, infinite=False)

return self.evaluate_stream(stream, steps, metrics, verbose)

def learn_batch(self, X, Y, metrics=(), **kw) -> dict:
raise NotImplementedError

Expand Down
2 changes: 1 addition & 1 deletion brainforge/learner/backpropagation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from ..optimizers import optimizers, GradientDescent


class BackpropNetwork(Learner):
class Backpropagation(Learner):

def __init__(self, layerstack, cost="mse", optimizer="sgd", name="", **kw):
super().__init__(layerstack, cost, name, **kw)
Expand Down
4 changes: 2 additions & 2 deletions brainforge/learner/feedback_alignment.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import numpy as np

from .backpropagation import BackpropNetwork
from .backpropagation import Backpropagation
from ..util.typing import white


class DirectFeedbackAlignment(BackpropNetwork):
class DirectFeedbackAlignment(Backpropagation):

def __init__(self, layerstack, cost, optimizer, name="", **kw):
super().__init__(layerstack, cost, optimizer, name, **kw)
Expand Down
4 changes: 2 additions & 2 deletions brainforge/learner/local_correlation.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import numpy as np

from .backpropagation import BackpropNetwork
from .backpropagation import Backpropagation
from ..metrics import mse


class LocalCorrelationAligment(BackpropNetwork):
class LocalCorrelationAligment(Backpropagation):

def backpropagate(self, error):
m = len(error)
Expand Down
4 changes: 2 additions & 2 deletions brainforge/util/persistance.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,15 @@ def __getitem__(self, item):


def load(capsule):
from ..learner import BackpropNetwork
from ..learner import Backpropagation
from ..optimizers import optimizers
from ..util.shame import translate_architecture as trsl

if not isinstance(capsule, Capsule):
capsule = Capsule.read(capsule)
c = capsule

net = BackpropNetwork(input_shape=c["vlayers"][0][0], name=c["vname"])
net = Backpropagation(input_shape=c["vlayers"][0][0], name=c["vname"])

for layer_name, layer_capsule in zip(c["varchitecture"], c["vlayers"]):
if layer_name[:5] == "Input":
Expand Down
16 changes: 16 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from setuptools import setup, find_packages

long_description = open("Readme.md").read()

setup(
name='brainforge',
version='0.1.0',
packages=find_packages(),
url='https://github.com/csxeba/brainforge.git',
license='GPLv3',
author='csxeba',
author_email='csxeba@gmail.com',
description='Deep Learning with NumPy only!',
long_description=long_description,
long_description_content_type='text/markdown'
)
4 changes: 2 additions & 2 deletions xperiments/xp_conv.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
from verres.data import MNIST

from brainforge.learner import BackpropNetwork
from brainforge.learner import Backpropagation
from brainforge.layers import ConvLayer, PoolLayer, Flatten, Dense, Activation
from brainforge import gradientcheck

X, Y = MNIST().table("train")
X = X[..., 0][:, None, ...]
ins, ous = X.shape[1:], Y.shape[1:]
net = BackpropNetwork(input_shape=ins, layerstack=[
net = Backpropagation(input_shape=ins, layerstack=[
ConvLayer(32, 3, 3, compiled=1),
Activation("relu"),
ConvLayer(64, 3, 3, compiled=1),
Expand Down
6 changes: 3 additions & 3 deletions xperiments/xp_decoupled.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from brainforge.learner import BackpropNetwork
from brainforge.learner import Backpropagation
from brainforge.layers import Dense
from brainforge.optimizers import Momentum
from brainforge.util import etalon
Expand Down Expand Up @@ -36,15 +36,15 @@ def udpate(self, true_delta):


def build_net(inshape, outshape):
net = BackpropNetwork(input_shape=inshape, layerstack=[
net = Backpropagation(input_shape=inshape, layerstack=[
Dense(30, activation="tanh"),
Dense(outshape, activation="softmax")
], cost="cxent", optimizer=Momentum(0.01))
return net


def build_synth(inshape, outshape):
synth = BackpropNetwork(input_shape=inshape, layerstack=[
synth = Backpropagation(input_shape=inshape, layerstack=[
Dense(outshape)
], cost="mse", optimizer=Momentum(0.01))
return synth
Expand Down
4 changes: 2 additions & 2 deletions xperiments/xp_dense.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from brainforge.learner import BackpropNetwork
from brainforge.learner import Backpropagation
from brainforge.layers import Dense
from brainforge.gradientcheck import GradientCheck
from brainforge.util import etalon

X, Y = etalon
inshape, outshape = X.shape[1:], Y.shape[1:]

network = BackpropNetwork(input_shape=inshape, layerstack=[
network = Backpropagation(input_shape=inshape, layerstack=[
Dense(32, activation="sigmoid", trainable=1),
Dense(32, activation="sigmoid", trainable=1),
Dense(outshape, activation="linear", trainable=1)
Expand Down
4 changes: 2 additions & 2 deletions xperiments/xp_getout.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@

from grund import getout

from brainforge import BackpropNetwork
from brainforge import Backpropagation
from brainforge.layers import Dense, Flatten
from brainforge.reinforcement import DQN, AgentConfig

env = getout.GetOut((10, 10))
agent = DQN(
BackpropNetwork(input_shape=env.neurons_required[0], layerstack=[
Backpropagation(input_shape=env.neurons_required[0], layerstack=[
Flatten(),
Dense(30, activation="tanh"),
Dense(env.neurons_required[-1], activation="linear")
Expand Down
4 changes: 2 additions & 2 deletions xperiments/xp_iris_etalon.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from brainforge.util import etalon
from brainforge import LayerStack, BackpropNetwork
from brainforge import LayerStack, Backpropagation
from brainforge.layers import Dense, DropOut


Expand All @@ -9,5 +9,5 @@
Dense(3, activation="softmax")
])

net = BackpropNetwork(ls, cost="cxent", optimizer="momentum")
net = Backpropagation(ls, cost="cxent", optimizer="momentum")
costs = net.fit(*etalon, epochs=300, validation=etalon, verbose=1)
4 changes: 2 additions & 2 deletions xperiments/xp_lstm.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np

from brainforge.learner import BackpropNetwork
from brainforge.learner import Backpropagation
from brainforge.layers import LSTM, Dense
from brainforge.gradientcheck import GradientCheck

Expand All @@ -11,7 +11,7 @@
X = np.random.randn(*DSHAPE)
Y = np.random.randn(*OUTSHP)

net = BackpropNetwork(input_shape=DSHAPE[1:], layerstack=[
net = Backpropagation(input_shape=DSHAPE[1:], layerstack=[
LSTM(16, activation="tanh", compiled=1),
Dense(OUTSHP[1:], activation="linear", trainable=0)
], cost="mse", optimizer="sgd")
Expand Down
4 changes: 2 additions & 2 deletions xperiments/xp_mnist.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from verres.data import inmemory

from brainforge import BackpropNetwork
from brainforge import Backpropagation
from brainforge.layers import Dense, Flatten

mnist = inmemory.MNIST()
(lX, lY), (tX, tY) = mnist.table("train", shuffle=True), mnist.table("val", shuffle=False)

ann = BackpropNetwork(input_shape=lX.shape[1:], layerstack=[
ann = Backpropagation(input_shape=lX.shape[1:], layerstack=[
Flatten(),
Dense(64, activation="tanh"),
Dense(10, activation="softmax")
Expand Down
4 changes: 2 additions & 2 deletions xperiments/xp_pggymin.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from matplotlib import pyplot

from brainforge.learner import BackpropNetwork
from brainforge.learner import Backpropagation
from brainforge.layers import Dense
from brainforge.optimizers import Momentum
from brainforge.reinforcement import PG, AgentConfig
Expand All @@ -15,7 +15,7 @@


def get_agent():
brain = BackpropNetwork(input_shape=env.observation_space.shape, layerstack=[
brain = Backpropagation(input_shape=env.observation_space.shape, layerstack=[
Dense(nactions, activation="softmax")
], cost="cxent", optimizer=Momentum(eta=0.001))
return brain
Expand Down
4 changes: 2 additions & 2 deletions xperiments/xp_pong.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import gym
import numpy as np

from brainforge import BackpropNetwork
from brainforge import Backpropagation
from brainforge.layers import Dense
from brainforge.reinforcement import PG, AgentConfig

Expand Down Expand Up @@ -32,7 +32,7 @@ def ds(image):
nactions = env.action_space.n
stateshape = 6400
print("Pong stateshape =", stateshape)
brain = BackpropNetwork(input_shape=stateshape, layerstack=[
brain = Backpropagation(input_shape=stateshape, layerstack=[
Dense(200, activation="tanh"),
Dense(nactions, activation="softmax")
], cost="cxent", optimizer="adam")
Expand Down
Loading

0 comments on commit b0ade67

Please sign in to comment.