-
Notifications
You must be signed in to change notification settings - Fork 0
/
tests.py
336 lines (310 loc) · 13.2 KB
/
tests.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
"""
This module runs unit tests to measure dlm performance and correctness.
"""
import itertools
import unittest
import aml
import torch
import dlm
class FunctionalTests(unittest.TestCase):
"""
The following class implements functional tests. Functional correctness
tests verify: (1) model classes are functionally correct (in that they do
not cause errors), (2) special functionalities (determining the maximum
batch size on gpus, performing adversarial training, using validation sets,
loading and saving models, using checkpoints etc.) are correct.
:func:`test_adversarial_training`: tests adversarial training subroutine
:func:`test_checkpoints`: tests checkpoint subroutine correctness
:func:`test_gpu_oom`: tests gpu oom prevention correctness
:func:`test_save_load`: tests saving and loading models
:func:`test_models`: test model class correctness
:func:`verify_cnn`: verifies cnn architectures
:func:`verify_linear`: verifies linear model architectures
:func:`verify_mlp`: verifies mlp architectures
"""
@classmethod
def setUpClass(cls, classes=2, features=16, samples=32):
"""
This function initializes the setup necessary for all test cases within
this module. Specifically, this method: (1) generates data, (2)
instantiates parameters used to configure models, and (3) sets an
attack to be used for adversarial training.
:param classes: number of classes to test with
:type classes: int
:param features: number of features to test with (should be power of 2)
:type features: int
:param samples: number of samples to test with
:type samples: int
:return: None
:rtype: NoneType
"""
cls.classes = classes
cls.features = features
cls.samples = samples
cls.x = torch.rand((samples, features))
cls.y = torch.randint(classes, (samples,))
linear = dict(
auto_batch=False,
attack=None,
attack_params=None,
batch_size=samples // 2,
classes=classes,
epochs=3,
learning_rate=1e-2,
loss=torch.nn.CrossEntropyLoss,
optimizer=torch.optim.SGD,
optimizer_params=dict(momentum=0.01, nesterov=True),
scheduler=torch.optim.lr_scheduler.ExponentialLR,
scheduler_params=dict(gamma=0.01),
threads=-1,
verbosity=0.25,
)
mlp = linear | dict(
activation=torch.nn.ReLU,
dropout=0.1,
hidden_layers=(samples // classes, samples // classes),
)
cnn = mlp | dict(
conv_layers=(features // 2, features),
kernel_size=1,
shape=(1, int(features ** (1 / 2)), int(features ** (1 / 2))),
)
cls.model_template_pairs = (
(dlm.LinearClassifier, linear),
(dlm.MLPClassifier, mlp),
(dlm.CNNClassifier, cnn),
)
cls.attack = dict(
attack=aml.bim, attack_params=dict(alpha=0.01, epsilon=0.1, epochs=2)
)
return None
def test_adversarial_training(self):
"""
This method validates the correctness of the adversarial training (and
the use of a validation dataset). Specifically, a model is considered
to be functionally correct if: (1) performing adversarial training, (2)
creating validation data from the training set, and (3) using an
explicitly passed validation set does not cause any errors.
:return: None
:rtype: NoneType
"""
for model, template in self.model_template_pairs:
print(f"Testing {model.__name__}...", end="\r")
with self.subTest(Model=f"{model.__name__}"):
model = model(**template | self.attack)
model.fit(self.x, self.y, 0.10)
model.fit(self.x, self.y, (self.x, self.y))
return None
def test_gpu_oom(self):
"""
This method validates the correctness of the the max_batch_size
subroutine. Specifically, this subroutine is considered to be
functionally correct if exceptions are handled properly.
:return: None
:rtype: NoneType
"""
for model, template in self.model_template_pairs:
print(f"Testing {model.__name__}...", end="\r")
with self.subTest(Model=f"{model.__name__}"):
model = model(**template | dict(auto_batch=True, device="cuda"))
model.fit(self.x, self.y, 0.10)
return None
def test_models(self):
"""
This method validates the correctness of model instantiations and their
operation. Specifically, a model object is considered to be
functionally correct if: (1) it can be instantiated, (2) its
architecture matches the arguments provided on initialization, and (3)
its fit method does not cause any errors.
:return: None
:rtype: NoneType
"""
for model, template in self.model_template_pairs:
print(f"Testing {model.__name__}...", end="\r")
with self.subTest(Model=f"{model.__name__}"):
model = model(**template)
model.fit(self.x, self.y)
self.verify_linear(self.classes, self.features, model.model) if type(
model
) is dlm.LinearClassifier else self.verify_mlp(
self.classes,
self.features,
model.model,
template["activation"],
template["dropout"],
template["hidden_layers"],
) if type(
model
) is dlm.MLPClassifier else self.verify_cnn(
self.classes,
model.model,
template["activation"],
template["conv_layers"],
template["dropout"],
template["hidden_layers"],
template["kernel_size"],
template["shape"],
)
return None
def test_save_load(self, path="/tmp/"):
"""
This method validates the correctness of model saving and loading
subroutines. Specifically, these subroutines are considered to be
functionally correct if models can be reinstantiated correctly after
reading their structure from disk.
:param path: path to save and load models
:type path: str
:return: None
:rtype:NoneType
"""
for (model_class, template), slim in itertools.product(
self.model_template_pairs, (False, False)
):
print(f"Testing {model_class.__name__}...", end="\r")
with self.subTest(Model=f"{model_class.__name__}", Slim=slim):
# initialize models at various states
model = model_class(**template)
trained = model_class(**template)
skeleton = model_class(**template)
model.fit(self.x, self.y)
trained.fit(self.x, self.y)
# restore from full model and from state dicts
savepath = f"{path}dlm_test_{model_class.__name__}"
model.save(savepath, slim=slim)
trained.load(savepath)
skeleton.load(savepath)
self.assertMultiLineEqual(
model.model.__repr__() * 2,
trained.model.__repr__() + skeleton.model.__repr__(),
)
return None
def verify_cnn(
self,
classes,
model,
activation,
conv_layers,
dropout,
hidden_layers,
kernel_size,
shape,
):
"""
This method verfies convolutional neural networks. Specifically, it
confirms that: (1) each layer follows a
dropout-convolution-activation-maxpool pattern a
convolutional-layer-number of times, followed by a flatten into a
dropout-linear-activation pattern a hidden-layer-number of times, (2)
dropout rates set to those passed on object instantiation, and (3)
convolutional input & output channels and linear layer input & output
features match input shape, convolutional layers, hidden layers, and
classes.
:param classes: number of classes
:type classes: int
:param model: model to verify
:type model: torch.nn.modules.container Sequential object
:param activation: activation function used
:type activation: torch.nn.modules.activation class
:param conv_layers: number of filters at each convolutional layers
:type conv_layers: tuple of ints
:param dropout: dropout rate
:type dropout: float
:param hidden_layers: number of neurons at each hidden layers
:type hidden_layers: tuple of ints
:param kernel_size: size of the convolving kernel
:type kernel_size: int
:param shape: original input shape (channels, width, height)
:type shape: tuple of ints
:return: None
:rtype: NoneType
"""
# validate cnn portion of the network
cnn_comp = itertools.cycle(
(torch.nn.Dropout, torch.nn.Conv2d, activation, torch.nn.MaxPool2d)
)
cnn_params = itertools.chain(
*itertools.product((dropout,), conv_layers, (None,), (kernel_size,))
)
for layer, comp, param in zip(
model[1 : len(conv_layers) * 4 + 1], cnn_comp, cnn_params
):
if comp is torch.nn.Dropout:
self.assertTupleEqual((type(layer), layer.p), (comp, param))
elif comp is torch.nn.Conv2d:
self.assertTupleEqual((type(layer), layer.out_channels), (comp, param))
elif comp is torch.nn.MaxPool2d:
self.assertTupleEqual((type(layer), layer.kernel_size), (comp, param))
else:
self.assertEqual(type(layer), comp)
# validate mlp portion of the network
mlp_comp = itertools.cycle((torch.nn.Dropout, torch.nn.Linear, activation))
mlp_params = itertools.chain(
*itertools.product((dropout,), hidden_layers, (None,))
)
for layer, comp, param in zip(
model[len(conv_layers) * 4 + 2 : -1], mlp_comp, mlp_params
):
if comp is torch.nn.Dropout:
self.assertTupleEqual((type(layer), layer.p), (comp, param))
elif comp is torch.nn.Linear:
self.assertTupleEqual((type(layer), layer.out_features), (comp, param))
else:
self.assertEqual(type(layer), comp)
# validate input and output features
self.assertTupleEqual(
(model[0].unflattened_size, model[-1].out_features), (shape, classes)
)
return None
def verify_linear(self, classes, features, model):
"""
This method verfies linear models. Specifically, it confirms
that the number of inputs and outputs matches the number of
features and classes, respectively.
:param classes: number of classes
:type classes: int
:param features: number of features
:type features: int
:param model: model to verify
:type model: torch.nn.modules.container Sequential object
:return: None
:rtype: NoneType
"""
self.assertTupleEqual(
(model[0].in_features, model[0].out_features), (features, classes)
)
return None
def verify_mlp(self, classes, features, model, activation, dropout, hidden_layers):
"""
This method verfies multi-layer perceptrons. Specifically, it confirms
that: (1) each layer follows a dropout-linear-activation pattern, (2)
dropout rates set to those passed on object instantiation, and (3)
linear layer input and output features match input features, hidden
layers, and classes.
:param classes: number of classes
:type classes: int
:param features: number of features
:type features: int
:param model: model to verify
:type model: torch.nn.modules.container Sequential object
:param activation: activation function used
:type activation: torch.nn.modules.activation class
:param dropout: dropout rate
:type dropout: float
:param hidden_layers: number of neurons at each hidden layers
:type hidden_layers: tuple of ints
:return: None
:rtype: NoneType
"""
comps = itertools.cycle((torch.nn.Dropout, torch.nn.Linear, activation))
params = itertools.chain(*itertools.product((dropout,), hidden_layers, (None,)))
for layer, comp, param in zip(model[:-1], comps, params):
if comp is torch.nn.Dropout:
self.assertTupleEqual((type(layer), layer.p), (comp, param))
elif comp is torch.nn.Linear:
self.assertTupleEqual((type(layer), layer.out_features), (comp, param))
else:
self.assertEqual(type(layer), comp)
self.assertTupleEqual(
(model[1].in_features, model[-1].out_features), (features, classes)
)
return None