forked from ZZUTK/Face-Aging-CAAE
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ops.py
124 lines (103 loc) · 4.68 KB
/
ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
from __future__ import division
import tensorflow as tf
import numpy as np
from scipy.misc import imread, imresize, imsave
def conv2d(input_map, num_output_channels, size_kernel=5, stride=2, name='conv2d'):
with tf.variable_scope(name):
# stddev = np.sqrt(2.0 / (np.sqrt(input_map.get_shape()[-1].value * num_output_channels) * size_kernel ** 2))
stddev = .02
kernel = tf.get_variable(
name='w',
shape=[size_kernel, size_kernel, input_map.get_shape()[-1], num_output_channels],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=stddev)
)
biases = tf.get_variable(
name='b',
shape=[num_output_channels],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0)
)
conv = tf.nn.conv2d(input_map, kernel, strides=[1, stride, stride, 1], padding='SAME')
return tf.nn.bias_add(conv, biases)
def fc(input_vector, num_output_length, name='fc'):
with tf.variable_scope(name):
# stddev = np.sqrt(1.0 / (np.sqrt(input_vector.get_shape()[-1].value * num_output_length)))
stddev = .02
w = tf.get_variable(
name='w',
shape=[input_vector.get_shape()[1], num_output_length],
dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=stddev)
)
b = tf.get_variable(
name='b',
shape=[num_output_length],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0)
)
return tf.matmul(input_vector, w) + b
def deconv2d(input_map, output_shape, size_kernel=5, stride=2, stddev=0.02, name='deconv2d'):
with tf.variable_scope(name):
# stddev = np.sqrt(1.0 / (np.sqrt(input_map.get_shape()[-1].value * output_shape[-1]) * size_kernel ** 2))
stddev = .02
# filter : [height, width, output_channels, in_channels]
kernel = tf.get_variable(
name='w',
shape=[size_kernel, size_kernel, output_shape[-1], input_map.get_shape()[-1]],
dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=stddev)
)
biases = tf.get_variable(
name='b',
shape=[output_shape[-1]],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0)
)
deconv = tf.nn.conv2d_transpose(input_map, kernel, strides=[1, stride, stride, 1], output_shape=output_shape)
return tf.nn.bias_add(deconv, biases)
def lrelu(logits, leak=0.2):
return tf.maximum(logits, leak*logits)
def concat_label(x, label, duplicate=1):
x_shape = x.get_shape().as_list()
if duplicate < 1:
return x
# duplicate the label to enhance its effect, does it really affect the result?
label = tf.tile(label, [1, duplicate])
label_shape = label.get_shape().as_list()
if len(x_shape) == 2:
return tf.concat(axis=1, values=[x, label])
elif len(x_shape) == 4:
label = tf.reshape(label, [x_shape[0], 1, 1, label_shape[-1]])
return tf.concat(axis=3, values=[x, label*tf.ones([x_shape[0], x_shape[1], x_shape[2], label_shape[-1]])])
def load_image(
image_path, # path of a image
image_size=64, # expected size of the image
image_value_range=(-1, 1), # expected pixel value range of the image
is_gray=False, # gray scale or color image
):
if is_gray:
image = imread(image_path, mode='L').astype(np.float32)
else:
image = imread(image_path, mode='RGB').astype(np.float32)
image = imresize(image, [image_size, image_size])
image = image.astype(np.float32) * (image_value_range[-1] - image_value_range[0]) / 255.0 + image_value_range[0]
return image
def save_batch_images(
batch_images, # a batch of images
save_path, # path to save the images
image_value_range=(-1,1), # value range of the input batch images
size_frame=None # size of the image matrix, number of images in each row and column
):
# transform the pixcel value to 0~1
images = (batch_images - image_value_range[0]) / (image_value_range[-1] - image_value_range[0])
if size_frame is None:
auto_size = int(np.ceil(np.sqrt(images.shape[0])))
size_frame = [auto_size, auto_size]
img_h, img_w = batch_images.shape[1], batch_images.shape[2]
frame = np.zeros([img_h * size_frame[0], img_w * size_frame[1], 3])
for ind, image in enumerate(images):
ind_col = ind % size_frame[1]
ind_row = ind // size_frame[1]
frame[(ind_row * img_h):(ind_row * img_h + img_h), (ind_col * img_w):(ind_col * img_w + img_w), :] = image
imsave(save_path, frame)