-
Notifications
You must be signed in to change notification settings - Fork 1
/
failures_MNIST.py
191 lines (146 loc) · 7.03 KB
/
failures_MNIST.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
import functools
import sys
import os
import numpy as np
import pickle
from networks import LinearWeightDropout
from networks import LinearNet2L, LinearNet3L
from networks import ClassifierNet2L, ClassifierNet3L
from training_utils import train_classifier as train
from training_utils import test_classifier as test
from training_utils import append
from stats_utils import run_statistics, load_statistics
from plot_utils import (plot_alignment_layers, plot_singular_values,
plot_loss_accuracy, plot_weights,
plot_hidden_units, plot_covariance)
if __name__ == "__main__":
training = True
analysis = True
plotting = True
# ==================================================
# SETUP PARAMETERS
# get parameters as inputs
activation = sys.argv[1] # hidden layer activation function
scaling = sys.argv[2] # init pars scaling ("lin"=1/N or "sqrt"=1/sqrt(N))
N = int(sys.argv[3]) # number of units per hidden layer
n_layers = int(sys.argv[4]) # number of layers (hidden + 1)
d_output = 10 # 10 digits in MNIST
drop_p = float(sys.argv[5]) # probability of weight drop
if not drop_p:
drop_l = None
else:
drop_l = sys.argv[6] # layer(s) with dropout, combined in a string ("1", "12", "13" etc)
assert n_layers in [2,3], f"Invalid number of layers, {n_layers}"
assert activation in ["linear", "relu"], f"Invalid activation function, '{activation}'"
if n_layers == 2:
if activation == "relu":
Net = ClassifierNet2L
if activation == "linear":
Net = LinearNet2L
elif n_layers == 3:
if activation == "relu":
Net = ClassifierNet3L
if activation == "linear":
Net = LinearNet3L
# set (and create) output directory
out_dir = f"outputs_MNIST/{n_layers}L_{activation}/"
out_dir += f"{scaling}/"
out_dir += f"N_{N:04d}/"
out_dir += f"{drop_l}/"
out_dir += f"q_{drop_p:.2f}"
os.makedirs(out_dir, exist_ok=True)
print(f"Output directory:\n\t{out_dir}\n")
# find device
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f"device = {device}")
# ==================================================
# SETUP TRAINING
n_epochs = 10000
n_skip = 100 # epochs to skip when saving data
lr = 1e-4
wd = 0.
train_kwargs = {'batch_size': 100}
test_kwargs = {'batch_size': 100}
use_cuda = True
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
# ==================================================
# TRAINING/TESTING
if training:
print("\nTRAINING ...")
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: torch.flatten(x))
])
train_dataset = datasets.MNIST('data', train=True, #download=True,
transform=transform)
test_dataset = datasets.MNIST('data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset,**train_kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, **test_kwargs)
train_data = torch.flatten(train_dataset.data, start_dim=1).numpy()
covariance_XX = np.cov(train_data.T)
np.save( f"{out_dir}/covariance_XX.npy", covariance_XX )
model = Net(d_input=28*28, d_output=10, d_hidden=N, layer_type=functools.partial(LinearWeightDropout, drop_p=drop_p),
bias=False, scaling=scaling, drop_l=drop_l).to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=wd)
model.save(f"{out_dir}/model_init")
print(model)
train_loss = []; train_acc = []
test_loss = []; test_acc = []
hidden = [np.array([]) for _ in range(n_layers - 1)]
model_weights = [np.array([]) for _ in range(n_layers)]
saved_epochs = []
for epoch in range(n_epochs + 1):
# train (except on the first epoch)
train_loss_, train_acc_ = train(model, device, train_loader, optimizer, epoch, log_interval=1000)
# test
test_loss_, test_acc_, model_weights_, hidden_ = test(model, device, test_loader)
train_loss.append(train_loss_); train_acc.append(train_acc_)
test_loss.append(test_loss_); test_acc.append(test_acc_)
# collect statistics
if epoch % n_skip == 0:
model.save(f"{out_dir}/model_trained")
saved_epochs.append(epoch)
np.save(f"{out_dir}/saved_epochs.npy", np.array(saved_epochs))
np.save(f"{out_dir}/train_loss.npy", np.array([train_loss, train_acc]))
np.save(f"{out_dir}/test_loss.npy", np.array([test_loss, test_acc]))
for l in range(n_layers - 1):
hidden[l] = append(hidden[l], hidden_[l])
np.save( f"{out_dir}/hidden_{l+1}.npy", hidden[l] )
for l in range(n_layers):
model_weights[l] = append(model_weights[l], model_weights_[l])
np.save( f"{out_dir}/weights_{l+1}.npy", model_weights[l] )
# ==================================================
# ANALYSIS
if analysis:
print("STATISTICS ...")
run_statistics(out_dir)
# ==================================================
# PLOTS
if plotting:
print("PLOTTING ...")
# re-load saved data
saved_epochs = np.load(f"{out_dir}/saved_epochs.npy")
train_loss, train_acc = np.load(f"{out_dir}/train_loss.npy")
test_loss, test_acc = np.load(f"{out_dir}/test_loss.npy")
hidden = [np.load( f"{out_dir}/hidden_{l+1}.npy" ) for l in range(n_layers - 1)]
model_weights = [np.load( f"{out_dir}/weights_{l+1}.npy" ) for l in range(n_layers)]
covariance_XX = np.load( f"{out_dir}/covariance_XX.npy" )
weights_norm, (Us, Ss, Vs), projs = load_statistics(out_dir)
title = f"init {'1/N' if scaling == 'lin' else '1/sqrt(N)'}; L={n_layers}; N={N:04d}; drop {drop_l} wp {drop_p:.2f}"
plot_covariance (covariance_XX, d_output=d_output, out_dir=out_dir, title=title)
plot_alignment_layers (projs, d_output=d_output, epochs=saved_epochs, out_dir=out_dir, title=title)
plot_singular_values (Ss, epochs=saved_epochs, out_dir=out_dir, title=title)
plot_loss_accuracy (train_loss, test_loss, train_acc, test_acc, test_epochs=saved_epochs, out_dir=out_dir, title=title)
plot_weights (model_weights, weights_norm, epochs=saved_epochs, out_dir=out_dir, title=title)
plot_hidden_units (hidden, epochs=saved_epochs, out_dir=out_dir, title=title)