Skip to content

Commit

Permalink
- issue #5:
Browse files Browse the repository at this point in the history
	- Added eval_split() method to Base evaluation
- issue #13:
	- Added evaluation tests
 On branch dev
 Changes to be committed:
	new file:   .gitignore
	new file:   .travis.yaml
	modified:   aawedha/evaluation/base.py
	modified:   aawedha/evaluation/cross_subject.py
	modified:   aawedha/evaluation/single_subject.py
	modified:   aawedha/io/base.py
	modified:   aawedha/io/dummy.py
	modified:   aawedha/models/EEGModels.py
	modified:   requirements.txt
	new file:   tests/evaluations_test.py
  • Loading branch information
okbalefthanded committed Sep 1, 2020
1 parent bb28756 commit 03ec50d
Show file tree
Hide file tree
Showing 10 changed files with 173 additions and 69 deletions.
11 changes: 11 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
dist/*
build/*
aawedha.egg-info
# test-related
.coverage
.cache
.pytest_cache

# developer environments
.idea
.vscode
13 changes: 13 additions & 0 deletions .travis.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
language: python

python:
- 3.6

before_install:
- pip install -r requirements.txt

install:
- python setup.py install

script:
- pytest tests/
71 changes: 56 additions & 15 deletions aawedha/evaluation/base.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from tensorflow.keras.layers.experimental import preprocessing
from aawedha.utils.utils import log, get_gpu_name, init_TPU
from sklearn.metrics import roc_curve, confusion_matrix
from aawedha.utils.evaluation_utils import class_weights
from aawedha.evaluation.checkpoint import CheckPoint
from tensorflow.keras.models import load_model
import tensorflow as tf
Expand Down Expand Up @@ -137,8 +138,8 @@ def __init__(self, dataset=None, model=None, partition=None, folds=None,
else:
title = ''
now = datetime.datetime.now().strftime('%c').replace(' ', '_')
f = 'aawedha/logs/'+'_'.join([self.__class__.__name__,
title, now, '.log'])
f = 'aawedha/logs/' + '_'.join([self.__class__.__name__,
title, now, '.log'])
self.logger = log(fname=f, logger_name='eval_log')
else:
self.logger = None
Expand Down Expand Up @@ -347,7 +348,7 @@ def save_model(self, folderpath=None):
prdg = self.dataset.paradigm.title
dt = self.dataset.title
filepath = folderpath + '/' + \
'_'.join([self.model.name, prdg, dt, '.h5'])
'_'.join([self.model.name, prdg, dt, '.h5'])
self.model.save(filepath)

def set_model(self, model=None, model_config={}):
Expand Down Expand Up @@ -386,12 +387,20 @@ def set_model(self, model=None, model_config={}):
# self.model_config = model_config

self.initial_weights = model.get_weights()
input_shape = model.layers[0].input_shape[0][1:]
if model.layers[0].input_shape is list:
# model created using Functional API
input_shape = model.layers[0].input_shape[0][1:]
else:
# model created using Sequential class
input_shape = model.layers[0].input_shape[1:]

model_name = f'{model.name}_norm_'
self.model = tf.keras.models.Sequential([
self.normalizer,
tf.keras.layers.Reshape(input_shape),
model], name=model_name)
model
],
name=model_name)

def set_config(self, model_config):
"""Setter for model_config
Expand Down Expand Up @@ -441,16 +450,16 @@ def log_experiment(self):
if self.dataset:
data = f' Dataset: {self.dataset.title}'
if isinstance(self.dataset.epochs, list):
duration = f' epoch duration:{self.dataset.epochs[0].shape[0]/self.dataset.fs} sec'
duration = f' epoch duration:{self.dataset.epochs[0].shape[0] / self.dataset.fs} sec'
else:
duration = f' epoch duration:{self.dataset.epochs.shape[1]/self.dataset.fs} sec'
duration = f' epoch duration:{self.dataset.epochs.shape[1] / self.dataset.fs} sec'
else:
data = ''
duration = '0'

prt = 'Subjects partition ' + \
', '.join(f'{s[i], self.partition[i]}' for i in range(
len(self.partition)))
', '.join(f'{s[i], self.partition[i]}' for i in range(
len(self.partition)))
model = f'Model: {self.model.name}'
model_config = f'Model config: {self._get_model_configs_info()}'
device = self._get_device()
Expand Down Expand Up @@ -584,12 +593,12 @@ def _compile_model(self):
# if not self.model_config:
khsara, optimizer, metrics = self._get_compile_configs()

if device == 'GPU':
if device != 'TPU':
self.model.compile(loss=khsara,
optimizer=optimizer,
metrics=metrics
)
elif device == 'TPU':
else:
strategy = init_TPU()
with strategy.scope():
self.model = tf.keras.models.clone_model(self.model)
Expand Down Expand Up @@ -627,7 +636,7 @@ class weights
probs : 2d array (n_examples x n_classes)
model's output on test data as probabilities of belonging to
each class
perf :
"""
batch, ep, clbs = self._get_fit_configs()

Expand Down Expand Up @@ -658,6 +667,33 @@ class weights
perf = self.model.evaluate(X_test, Y_test, verbose=0)
return history, probs, perf

def _eval_split(self, split={}):
"""
Parameters
----------
split
Returns
-------
"""
X_train = split['X_train']
Y_train = split['Y_train']
X_test = split['X_test']
Y_test = split['Y_test']
X_val = split['X_val']
Y_val = split['Y_val']
#
cws = class_weights(Y_train)
# evaluate model on subj on all folds
self.model_history, probs, perf = self._eval_model(X_train, Y_train,
X_val, Y_val,
X_test, Y_test,
cws)
rets = self.measure_performance(Y_test, probs, perf)
return rets

def _get_compile_configs(self):
"""Returns default model compile configurations as tuple
Expand Down Expand Up @@ -839,7 +875,12 @@ def _get_device(self):
str
computer engine for training
"""
device = 'GPU'
# test if env got GPU
device = 'GPU' # default
if 'device' in self.model_config:
device = self.model_config['device']
return device
return self.model_config['device']
else:
devices = [dev.device_type for dev in tf.config.get_visible_devices()]
if 'GPU' not in devices:
device = 'CPU'
return device
16 changes: 1 addition & 15 deletions aawedha/evaluation/cross_subject.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from aawedha.evaluation.base import Evaluation
from aawedha.evaluation.checkpoint import CheckPoint
from aawedha.utils.evaluation_utils import class_weights
import numpy as np


Expand Down Expand Up @@ -212,20 +211,7 @@ def _cross_subject(self, fold):
folds performance
"""
split = self._split_set(fold)
X_train = split['X_train']
Y_train = split['Y_train']
X_test = split['X_test']
Y_test = split['Y_test']
X_val = split['X_val']
Y_val = split['Y_val']
#
cws = class_weights(Y_train)
# evaluate model on subj on all folds
self.model_history, probs, perf = self._eval_model(X_train, Y_train,
X_val, Y_val,
X_test, Y_test,
cws)
rets = self.measure_performance(Y_test, probs, perf)
rets = self._eval_split(split)
return rets

def _split_set(self, fold):
Expand Down
17 changes: 1 addition & 16 deletions aawedha/evaluation/single_subject.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from aawedha.evaluation.base import Evaluation
from aawedha.evaluation.checkpoint import CheckPoint
from aawedha.utils.evaluation_utils import class_weights
from sklearn.model_selection import KFold, StratifiedKFold
import numpy as np

Expand Down Expand Up @@ -251,21 +250,7 @@ def _single_subject(self, subj, indie=False):
for fold in folds_range:
#
split = self._split_set(x, y, subj, fold, indie)
X_train = split['X_train']
Y_train = split['Y_train']
X_test = split['X_test']
Y_test = split['Y_test']
X_val = split['X_val']
Y_val = split['Y_val']
#
cl_weights = class_weights(Y_train)
# evaluate model on subj on all folds
self.model_history, probs, perf = self._eval_model(X_train,
Y_train,
X_val, Y_val,
X_test, Y_test,
cl_weights)
rets.append(self.measure_performance(Y_test, probs, perf))
rets.append(self._eval_split(split))
return rets

def _fuse_data(self):
Expand Down
2 changes: 1 addition & 1 deletion aawedha/io/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def __str__(self):
f'Epoch length: {epoch_length}'
f'Channels: {self.ch_names}',
f'Trials:{trials}')
return '\n'.joint(info)
return '\n'.join(info)

@abstractmethod
def load_raw(self):
Expand Down
29 changes: 10 additions & 19 deletions aawedha/io/dummy.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,15 @@
Dummy dataset for experimenting
'''
from aawedha.io.base import DataSet
from tensorflow.keras.utils import to_categorical
import numpy as np


class Dummy(DataSet):

def __init__(self, train_shape=(5, 512, 14, 100),
test_shape=(5, 512, 14, 50), nb_classes=5, fs=512):
'''
'''
"""
"""
super().__init__(title='Dummy', ch_names=[],
fs=None, doi='')
mu, sigma = 0.0, 1.0
Expand All @@ -35,23 +34,15 @@ def load_raw(self):
NotImplementedError

def generate_set(self):
'''
'''
kernels = 1
"""
"""
val_trials = round(self.epochs.shape[3]*0.8)
sbj, samples, channels, trials = self.epochs.shape
self.x_train = self.epochs[:, :, :, :val_trials].transpose(
(0, 3, 2, 1)).reshape(
(sbj, val_trials, kernels, channels, samples))
self.x_val = self.epochs[:, :, :, val_trials:].transpose(
(0, 3, 2, 1)).reshape(
(sbj, trials-val_trials, kernels, channels, samples))
trials = self.test_epochs.shape[3]
self.x_test = self.test_epochs.transpose((0, 3, 2, 1)).reshape(
(sbj, trials, kernels, channels, samples))
self.y_train = to_categorical(self.y[:val_trials])
self.y_val = to_categorical(self.y[val_trials:])
self.y_test = to_categorical(self.test_y)
self.x_train = self.epochs[:, :, :, :val_trials]
self.x_val = self.epochs[:, :, :, val_trials:]
self.x_test = self.test_epochs
self.y_train = self.y[:, val_trials]
self.y_val = self.y[:, val_trials:]
self.y_test = self.test_y

def get_path(self):
NotImplementedError
1 change: 0 additions & 1 deletion aawedha/models/EEGModels.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ def EEGNet(nb_classes, Chans=64, Samples=128,
dropoutType : Either SpatialDropout2D or Dropout, passed as a string.
"""

if dropoutType == 'SpatialDropout2D':
dropoutType = SpatialDropout2D
elif dropoutType == 'Dropout':
Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@ numpy
pandas
mne
tensorflow
keras
tensorflow-addons
scikit-learn
scipy
matplotlib
seaborn
pynvml
pytest
pynvml
78 changes: 78 additions & 0 deletions tests/evaluations_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import pytest
import tensorflow as tf
import tensorflow.keras as keras

from aawedha.io.dummy import Dummy
from aawedha.evaluation.cross_subject import CrossSubject
from aawedha.evaluation.single_subject import SingleSubject
import numpy as np
import random


def seed():
tf.random.set_seed(42)
np.random.seed(42)
random.seed(42)


def make_data():
data = Dummy(train_shape=(5, 500, 10, 100), test_shape=(5, 500, 10, 50), nb_classes=5)
subjects, samples, channels, _ = data.epochs.shape
n_classes = np.unique(data.y[0]).size
return data, (subjects, samples, channels, n_classes)


def make_model(channels, samples, n_classes):
return keras.models.Sequential([
keras.Input(shape=(channels, samples, 1)),
keras.layers.Conv2D(40, (1, 31)),
keras.layers.Conv2D(40, (10, 1)),
keras.layers.BatchNormalization(),
keras.layers.Activation('elu'),
keras.layers.AveragePooling2D(pool_size=(1, 35), strides=(1, 7)),
keras.layers.Activation('elu'),
keras.layers.Dropout(0.5),
keras.layers.Flatten(),
keras.layers.Dense(n_classes, activation='softmax')],
name="dummy")


def process_evaluation(evl, nfolds=4, strategy='Kfold', model=None):
evl.generate_split(nfolds=nfolds, strategy=strategy)
evl.set_model(model=model)
evl.run_evaluation()
return evl.results


def test_single_subject():
# set seeds
seed()
# create random data
data, shapes = make_data()
subjects, samples, channels, n_classes = shapes
# define en evaluation
evl = SingleSubject(dataset=data, partition=[2, 1], verbose=0)
# set model
model = make_model(channels, samples, n_classes)
results = process_evaluation(evl, nfolds=4, strategy='Stratified', model=model)
# test value
assert np.testing.assert_allclose(results['accuracy_mean'], 0.18, rtol=0.2)


def test_cross_subject():
# set seeds
seed()
# create random data
data, shapes = make_data()
subjects, samples, channels, n_classes = shapes
# define en evaluation
evl = CrossSubject(dataset=data, partition=[4, 1], verbose=0)
# set model
model = make_model(channels, samples, n_classes)
results = process_evaluation(evl, nfolds=1, strategy='Kfold', model=model)
# test value
assert np.testing.assert_allclose(results['accuracy_mean'], 0.2, rtol=0.2)


if __name__ == '__main__':
pytest.main([__file__])

0 comments on commit 03ec50d

Please sign in to comment.