-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
109 lines (86 loc) · 3.38 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import os
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import PReLU
from keras.layers.convolutional import Conv1D
from keras.layers import MaxPooling1D
from keras.layers import BatchNormalization
from keras.callbacks import ModelCheckpoint
import wandb
from wandb.keras import WandbCallback
wandb.init(project="my-test-project", entity="pattersonlt")
output_path = 'models/'
def load_file(filepath):
df = pd.read_csv(filepath, header=None)
return df.values
def load_group(directory):
"""
load_group(directory)
Loads all files in a directory and appends them to a list
which gets converted into an np array and returned.
"""
data = []
labels = []
for filename in os.listdir(directory):
if os.path.isfile(directory + '/' + filename):
values = load_file(directory + '/' + filename)
data.append(values)
labels.append(filename[0])
data = np.array(data)
labels = np.array([ord(label) - 97 for label in labels])
return data, labels
def main():
# Load training data.
train_directory = 'BA_data/cleaned_data/train'
train, train_labels = load_group(train_directory)
train2, train_labels2 = load_group('MJ_data/cleaned_data/train')
train = np.concatenate((train, train2))
train_labels = np.concatenate((train_labels, train_labels2))
# Load test data.
test_directory = 'BA_data/cleaned_data/test'
test, test_labels = load_group(test_directory)
test2, test_labels2 = load_group('MJ_data/cleaned_data/test')
test = np.concatenate((test, test2))
test_labels = np.concatenate((test_labels, test_labels2))
wandb.config = {
"epochs": 100000,
"batch_size": 34
}
optimizer = 'Adamax'
#batch_size = 34
model = Sequential()
model.add(Conv1D(filters=114, kernel_size=2, activation='relu', input_shape=(4, 1000)))
model.add(BatchNormalization())
act = PReLU()
model.add(act)
model.add(Dropout(0.46905572800414))
model.add(MaxPooling1D(padding='same'))
model.add(Conv1D(filters=71, kernel_size=4, activation='relu', padding='same', input_shape=(4, 1000)))
model.add(BatchNormalization())
act = PReLU()
model.add(act)
model.add(Dropout(0.17185485630307443))
model.add(MaxPooling1D(padding='same'))
model.add(Dense(44))
act = PReLU()
model.add(act)
model.add(BatchNormalization())
model.add(Dropout(0.26138249279812387))
model.add(Flatten())
model.add(Dense(26, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# setup model checkpointing
model_checkpoint = ModelCheckpoint(
filepath='C:/Users/Luke/Downloads/capstone_emg-andrew/models/model_5-1.h5', # always overwrite the existing model
save_weights_only=False,
save_best_only=True, monitor='val_accuracy', verbose=1) # only save models that improve the 'monitored' value
callbacks = [model_checkpoint, WandbCallback()]
model.fit(train, train_labels, validation_data=(test, test_labels), epochs=10000000, batch_size=34, verbose=1, callbacks=callbacks)
_, accuracy = model.evaluate(test, test_labels, batch_size=25, verbose=1)
print(accuracy)
if __name__ == '__main__':
main()