-
Notifications
You must be signed in to change notification settings - Fork 30
/
ConvDESOM.py
86 lines (76 loc) · 3.16 KB
/
ConvDESOM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
"""
Implementation of the Convolutional Deep Embedded Self-Organizing Map model
Model file
@author Florent Forest
@version 2.0
"""
# Tensorflow/Keras
from keras.models import Model
# DESOM components
from SOM import SOMLayer
from AE import conv2d_autoencoder
from DESOM import DESOM
class ConvDESOM(DESOM):
"""Convolutional Deep Embedded Self-Organizing Map (ConvDESOM) model
Example
-------
```
desom = desom = ConvDESOM(input_shape=X_train.shape[1:],
encoder_filters=[32, 64, 128, 256],
filter_size=3,
pooling_size=1,
map_size=(10, 10))
```
Parameters
----------
input_shape : tuple
input shape given as (height, width) tuple
latent_dim : int
dimension of latent code (units in hidden dense layer)
encoder_filters : list
number of filters in each layer of encoder. The autoencoder is symmetric,
so the total number of layers is 2*len(encoder_filters) - 1
filter_size : int
size of conv filters
pooling_size : int
size of maxpool filters
map_size : tuple
size of the rectangular map. Number of prototypes is map_size[0] * map_size[1]
"""
def __init__(self, input_shape, latent_dim, encoder_filters, filter_size, pooling_size, map_size):
self.input_shape = input_shape
self.latent_dim = latent_dim
self.encoder_filters = encoder_filters
self.filter_size = filter_size
self.pooling_size = pooling_size
self.map_size = map_size
self.n_prototypes = map_size[0] * map_size[1]
self.pretrained = False
self.autoencoder = None
self.encoder = None
self.decoder = None
self.model = None
def initialize(self, ae_act='relu', ae_init='glorot_uniform', batchnorm=False):
"""Initialize ConvDESOM model
Parameters
----------
ae_act : str (default='relu')
activation for AE intermediate layers
ae_init : str (default='glorot_uniform')
initialization of AE layers
batchnorm : bool (default=False)
use batch normalization
"""
# Create AE models
self.autoencoder, self.encoder, self.decoder = conv2d_autoencoder(self.input_shape,
self.latent_dim,
self.encoder_filters,
self.filter_size,
self.pooling_size,
ae_act,
ae_init,
batchnorm)
som_layer = SOMLayer(self.map_size, name='SOM')(self.encoder.output)
# Create ConvDESOM model
self.model = Model(inputs=self.autoencoder.input,
outputs=[self.autoencoder.output, som_layer])