-
Notifications
You must be signed in to change notification settings - Fork 128
/
nn_models.py
61 lines (49 loc) · 2.28 KB
/
nn_models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# Hamiltonian Neural Networks | 2019
# Sam Greydanus, Misko Dzamba, Jason Yosinski
import torch
import numpy as np
from utils import choose_nonlinearity
class MLP(torch.nn.Module):
'''Just a salt-of-the-earth MLP'''
def __init__(self, input_dim, hidden_dim, output_dim, nonlinearity='tanh'):
super(MLP, self).__init__()
self.linear1 = torch.nn.Linear(input_dim, hidden_dim)
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear3 = torch.nn.Linear(hidden_dim, output_dim, bias=None)
for l in [self.linear1, self.linear2, self.linear3]:
torch.nn.init.orthogonal_(l.weight) # use a principled initialization
self.nonlinearity = choose_nonlinearity(nonlinearity)
def forward(self, x, separate_fields=False):
h = self.nonlinearity( self.linear1(x) )
h = self.nonlinearity( self.linear2(h) )
return self.linear3(h)
class MLPAutoencoder(torch.nn.Module):
'''A salt-of-the-earth MLP Autoencoder + some edgy res connections'''
def __init__(self, input_dim, hidden_dim, latent_dim, nonlinearity='tanh'):
super(MLPAutoencoder, self).__init__()
self.linear1 = torch.nn.Linear(input_dim, hidden_dim)
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear3 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear4 = torch.nn.Linear(hidden_dim, latent_dim)
self.linear5 = torch.nn.Linear(latent_dim, hidden_dim)
self.linear6 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear7 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear8 = torch.nn.Linear(hidden_dim, input_dim)
for l in [self.linear1, self.linear2, self.linear3, self.linear4, \
self.linear5, self.linear6, self.linear7, self.linear8]:
torch.nn.init.orthogonal_(l.weight) # use a principled initialization
self.nonlinearity = choose_nonlinearity(nonlinearity)
def encode(self, x):
h = self.nonlinearity( self.linear1(x) )
h = h + self.nonlinearity( self.linear2(h) )
h = h + self.nonlinearity( self.linear3(h) )
return self.linear4(h)
def decode(self, z):
h = self.nonlinearity( self.linear5(z) )
h = h + self.nonlinearity( self.linear6(h) )
h = h + self.nonlinearity( self.linear7(h) )
return self.linear8(h)
def forward(self, x):
z = self.encode(x)
x_hat = self.decode(z)
return x_hat