-
Notifications
You must be signed in to change notification settings - Fork 1
/
model.py
72 lines (59 loc) · 3.37 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
### This class contains the model of our SVHN class
import tensorflow as tf
class Model(object):
@staticmethod
def inference(x, drop_rate):
with tf.variable_scope('hidden1'):
conv = tf.layers.conv2d(x, filters=48, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden1 = dropout
with tf.variable_scope('hidden2'):
conv = tf.layers.conv2d(hidden1, filters=64, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden2 = dropout
with tf.variable_scope('hidden3'):
conv = tf.layers.conv2d(hidden2, filters=128, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden3 = dropout
flatten = tf.reshape(hidden3, [-1, 7 * 7 * 128])
with tf.variable_scope('hidden4'):
dense = tf.layers.dense(flatten, units=6272, activation=tf.nn.relu)
hidden4 = dense
with tf.variable_scope('hidden5'):
dense = tf.layers.dense(hidden4, units=6272, activation=tf.nn.relu)
hidden5 = dense
with tf.variable_scope('digit1'):
dense = tf.layers.dense(hidden5, units=11)
digit1 = dense
with tf.variable_scope('digit2'):
dense = tf.layers.dense(hidden5, units=11)
digit2 = dense
with tf.variable_scope('digit3'):
dense = tf.layers.dense(hidden5, units=11)
digit3 = dense
with tf.variable_scope('digit4'):
dense = tf.layers.dense(hidden5, units=11)
digit4 = dense
with tf.variable_scope('digit5'):
dense = tf.layers.dense(hidden5, units=11)
digit5 = dense
digits_logits = tf.stack([digit1, digit2, digit3, digit4, digit5], axis=1)
return digits_logits
@staticmethod
def loss(digits_logits,digits_batch):
digit1_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_batch[:, 0], logits=digits_logits[:, 0, :]))
digit2_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_batch[:, 1], logits=digits_logits[:, 1, :]))
digit3_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_batch[:, 2], logits=digits_logits[:, 2, :]))
digit4_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_batch[:, 3], logits=digits_logits[:, 3, :]))
digit5_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_batch[:, 4], logits=digits_logits[:, 4, :]))
loss = digit1_cross_entropy + digit2_cross_entropy + digit3_cross_entropy + digit4_cross_entropy + digit5_cross_entropy
return loss