Skip to content

Commit

Permalink
Test documentation (#511)
Browse files Browse the repository at this point in the history
* Activation Cleaning Docstring Test

* Requirements Pinned with range to insure tested versions are used. Range are used to prevent updating requirements all the time.

* setup.cfg file added with PEP8 configuration

* activation.py refactored

* docstring fixed - ready for documentation unittest

* Yapf correction for max_line_length: 120

* test yapf refactored

* test documentation added

* Missing requirement added: sphinx

* Allow test on documentation to pass on warning

* Fix travis dependencies install

* Travis install script fixed

* Travis install command fixed

* Requirements conflict solved

* Yapf Style modified and merged in file "setup.cfg"

* Yapf Confiuguration Updated

* Code Refactored with new YAPF formating style

* Code Refactored with new YAPF formating style

* Code Refactored with new YAPF formating style

* shorten codes

* Various Cleaning

* Trailing Slashes removed

* Test Recurrent Fixed

* Line Width Fix

* docs requirements updated

* fix example docs style

* Codacy Issue Fixed

* Merge Errors fixed

* YAPF Style Applied
  • Loading branch information
DEKHTIARJonathan authored and zsdonghao committed Apr 21, 2018
1 parent e557e62 commit f4a0572
Show file tree
Hide file tree
Showing 34 changed files with 244 additions and 261 deletions.
60 changes: 0 additions & 60 deletions .style.yapf

This file was deleted.

2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ env:
install:
- pip install tensorflow
- pip install -r requirements.txt
- pip install .[test]
- pip install -e .[dev,doc,test]


script:
Expand Down
2 changes: 1 addition & 1 deletion docs/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@ progressbar2>=3.37,<3.38
scikit-image>=0.13,<0.14
scipy>=1.0,<1.1
sphinx>=1.7,<1.8
tensorflow==1.5.0
tensorflow>=1.7,<1.8
16 changes: 10 additions & 6 deletions example/tutorial_binarynet_cifar10_tfrecord.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,13 @@ def read_and_decode(filename, is_train=None):
x_train_, y_train_ = read_and_decode("train.cifar10", True)
x_test_, y_test_ = read_and_decode("test.cifar10", False)
# set the number of threads here
x_train_batch, y_train_batch = tf.train.shuffle_batch([x_train_, y_train_], \
batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32)
x_train_batch, y_train_batch = tf.train.shuffle_batch(
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32
)
# for testing, uses batch instead of shuffle_batch
x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], \
batch_size=batch_size, capacity=50000, num_threads=32)
x_test_batch, y_test_batch = tf.train.batch(
[x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32
)

def model(x_crop, y_, reuse):
""" For more simplified CNN APIs, check tensorlayer.org """
Expand Down Expand Up @@ -239,8 +241,10 @@ def model(x_crop, y_, reuse):
n_batch += 1

if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch %d : Step %d-%d of %d took %fs" % \
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time))
print(
"Epoch %d : Step %d-%d of %d took %fs" %
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time)
)
print(" train loss: %f" % (train_loss / n_batch))
print(" train acc: %f" % (train_acc / n_batch))

Expand Down
3 changes: 1 addition & 2 deletions example/tutorial_binarynet_mnist_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
import tensorflow as tf
import tensorlayer as tl

X_train, y_train, X_val, y_val, X_test, y_test = \
tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
# X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)

sess = tf.InteractiveSession()
Expand Down
10 changes: 4 additions & 6 deletions example/tutorial_bipedalwalker_a3c_continuous_action.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,11 +198,14 @@ def work(self):
buffer_r.append(r)

if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net

if done:
v_s_ = 0 # terminal
else:
v_s_ = sess.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]

buffer_v_target = []

for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
Expand All @@ -211,12 +214,7 @@ def work(self):
buffer_s, buffer_a, buffer_v_target = (
np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
)

feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
feed_dict = {self.AC.s: buffer_s, self.AC.a_his: buffer_a, self.AC.v_target: buffer_v_target}
# update gradients on global network
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
Expand Down
16 changes: 10 additions & 6 deletions example/tutorial_dorefanet_cifar10_tfrecord.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,13 @@ def read_and_decode(filename, is_train=None):
x_train_, y_train_ = read_and_decode("train.cifar10", True)
x_test_, y_test_ = read_and_decode("test.cifar10", False)
# set the number of threads here
x_train_batch, y_train_batch = tf.train.shuffle_batch([x_train_, y_train_], \
batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32)
x_train_batch, y_train_batch = tf.train.shuffle_batch(
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32
)
# for testing, uses batch instead of shuffle_batch
x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], \
batch_size=batch_size, capacity=50000, num_threads=32)
x_test_batch, y_test_batch = tf.train.batch(
[x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32
)

def model(x_crop, y_, reuse):
""" For more simplified CNN APIs, check tensorlayer.org """
Expand Down Expand Up @@ -235,8 +237,10 @@ def model(x_crop, y_, reuse):
n_batch += 1

if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch %d : Step %d-%d of %d took %fs" % \
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time))
print(
"Epoch %d : Step %d-%d of %d took %fs" %
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time)
)
print(" train loss: %f" % (train_loss / n_batch))
print(" train acc: %f" % (train_acc / n_batch))

Expand Down
3 changes: 1 addition & 2 deletions example/tutorial_dorefanet_mnist_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
import tensorflow as tf
import tensorlayer as tl

X_train, y_train, X_val, y_val, X_test, y_test = \
tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
# X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)

sess = tf.InteractiveSession()
Expand Down
42 changes: 30 additions & 12 deletions example/tutorial_generate_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,10 +230,13 @@ def inference(x, is_train, sequence_length, reuse=None):
rnn_init = tf.random_uniform_initializer(-init_scale, init_scale)
with tf.variable_scope("model", reuse=reuse):
network = EmbeddingInputlayer(x, vocab_size, hidden_size, rnn_init, name='embedding')
network = RNNLayer(network, cell_fn=tf.contrib.rnn.BasicLSTMCell, \
cell_init_args={'forget_bias': 0.0, 'state_is_tuple': True}, \
n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False,
return_seq_2d=True, name='lstm1')
network = RNNLayer(
network, cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={
'forget_bias': 0.0,
'state_is_tuple': True
}, n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False,
return_seq_2d=True, name='lstm1'
)
lstm1 = network
network = DenseLayer(network, vocab_size, W_init=rnn_init, b_init=rnn_init, act=tf.identity, name='output')
return network, lstm1
Expand Down Expand Up @@ -297,14 +300,21 @@ def loss_fn(outputs, targets, batch_size, sequence_length):
## reset all states at the begining of every epoch
state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)):
_cost, state1, _ = sess.run([cost, lstm1.final_state, train_op], \
feed_dict={input_data: x, targets: y, lstm1.initial_state: state1})
_cost, state1, _ = sess.run(
[cost, lstm1.final_state, train_op], feed_dict={
input_data: x,
targets: y,
lstm1.initial_state: state1
}
)
costs += _cost
iters += sequence_length

if step % (epoch_size // 10) == 1:
print("%.3f perplexity: %.3f speed: %.0f wps" % \
(step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time)))
print(
"%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time))
)
train_perplexity = np.exp(costs / iters)
# print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity))
Expand All @@ -319,14 +329,22 @@ def loss_fn(outputs, targets, batch_size, sequence_length):
# feed the seed to initialize the state for generation.
for ids in outs_id[:-1]:
a_id = np.asarray(ids).reshape(1, 1)
state1 = sess.run([lstm1_test.final_state], \
feed_dict={input_data_test: a_id, lstm1_test.initial_state: state1})
state1 = sess.run(
[lstm1_test.final_state], feed_dict={
input_data_test: a_id,
lstm1_test.initial_state: state1
}
)
# feed the last word in seed, and start to generate sentence.
a_id = outs_id[-1]
for _ in range(print_length):
a_id = np.asarray(a_id).reshape(1, 1)
out, state1 = sess.run([y_soft, lstm1_test.final_state], \
feed_dict={input_data_test: a_id, lstm1_test.initial_state: state1})
out, state1 = sess.run(
[y_soft, lstm1_test.final_state], feed_dict={
input_data_test: a_id,
lstm1_test.initial_state: state1
}
)
## Without sampling
# a_id = np.argmax(out[0])
## Sample from all words, if vocab_size is large,
Expand Down
32 changes: 13 additions & 19 deletions example/tutorial_imagenet_inceptionV3_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@
from tensorflow.python.framework.errors_impl import OutOfRangeError
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook
from tensorflow.python.training.monitored_session import \
SingularMonitoredSession
from tensorflow.python.training.monitored_session import SingularMonitoredSession

import tensorlayer as tl

Expand Down Expand Up @@ -294,18 +293,15 @@ def calculate_metrics(predicted_batch, real_batch, threshold=0.5, is_training=Fa
def run_evaluator(task_spec, checkpoints_path, batch_size=32):
with tf.Graph().as_default():
# load dataset
images_input, one_hot_classes, num_classes, _dataset_size = \
load_data(file=VAL_FILE,
task_spec=task_spec,
batch_size=batch_size,
epochs=1)
images_input, one_hot_classes, num_classes, _dataset_size = load_data(
file=VAL_FILE, task_spec=task_spec, batch_size=batch_size, epochs=1
)
_network, predictions = build_network(images_input, num_classes=num_classes, is_training=False)
saver = tf.train.Saver()
# metrics
metrics_init_ops, _, metrics_ops = \
calculate_metrics(predicted_batch=predictions,
real_batch=one_hot_classes,
is_training=False)
metrics_init_ops, _, metrics_ops = calculate_metrics(
predicted_batch=predictions, real_batch=one_hot_classes, is_training=False
)
# tensorboard summary
summary_op = tf.summary.merge_all()
# session hook
Expand Down Expand Up @@ -338,12 +334,9 @@ def run_worker(task_spec, checkpoints_path, batch_size=32, epochs=10):
global_step = tf.train.get_or_create_global_step()
with tf.device(device_fn):
# load dataset
images_input, one_hot_classes, num_classes, dataset_size = \
load_data(file=TRAIN_FILE,
task_spec=task_spec,
batch_size=batch_size,
epochs=epochs,
shuffle_size=10000)
images_input, one_hot_classes, num_classes, dataset_size = load_data(
file=TRAIN_FILE, task_spec=task_spec, batch_size=batch_size, epochs=epochs, shuffle_size=10000
)
# network
network, predictions = build_network(images_input, num_classes=num_classes, is_training=True)
# training operations
Expand Down Expand Up @@ -390,8 +383,9 @@ def run_worker(task_spec, checkpoints_path, batch_size=32, epochs=10):
last_log_time = time.time()
next_log_time = last_log_time + 60
while not sess.should_stop():
step, loss_val, learning_rate_val, _, metrics = \
sess.run([global_step, loss, learning_rate, train_op, metrics_ops])
step, loss_val, learning_rate_val, _, metrics = sess.run(
[global_step, loss, learning_rate, train_op, metrics_ops]
)
if task_spec is None or task_spec.is_master():
now = time.time()
if now > next_log_time:
Expand Down
3 changes: 1 addition & 2 deletions example/tutorial_imdb_fasttext.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,7 @@ def hash_ngram(ngram):

def load_and_preprocess_imdb_data(n_gram=None):
"""Load IMDb data and augment with hashed n-gram features."""
X_train, y_train, X_test, y_test = \
tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)
X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)

if n_gram is not None:
X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train])
Expand Down
6 changes: 2 additions & 4 deletions example/tutorial_keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@
from keras.layers import *
from tensorlayer.layers import *

X_train, y_train, X_val, y_val, X_test, y_test = \
tl.files.load_mnist_dataset(shape=(-1, 784))
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784))

sess = tf.InteractiveSession()

Expand Down Expand Up @@ -43,8 +42,7 @@ def keras_block(x):
learning_rate = 0.0001

train_params = network.all_params
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08,
use_locking=False).minimize(cost, var_list=train_params)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, var_list=train_params)

tl.layers.initialize_global_variables(sess)

Expand Down
3 changes: 1 addition & 2 deletions example/tutorial_mlp_dropout1.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
sess = tf.InteractiveSession()

# prepare data
X_train, y_train, X_val, y_val, X_test, y_test = \
tl.files.load_mnist_dataset(shape=(-1,784))
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784))
# define placeholder
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y_ = tf.placeholder(tf.int64, shape=[None], name='y_')
Expand Down
6 changes: 2 additions & 4 deletions example/tutorial_mlp_dropout2.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
sess = tf.InteractiveSession()

# prepare data
X_train, y_train, X_val, y_val, X_test, y_test = \
tl.files.load_mnist_dataset(shape=(-1,784))
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784))
# define placeholder
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y_ = tf.placeholder(tf.int64, shape=[None], name='y_')
Expand Down Expand Up @@ -41,8 +40,7 @@ def mlp(x, is_train=True, reuse=False):

# define the optimizer
train_params = tl.layers.get_variables_with_name('MLP', train_only=True, printable=False)
train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999, epsilon=1e-08,
use_locking=False).minimize(cost, var_list=train_params)
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
Expand Down
Loading

0 comments on commit f4a0572

Please sign in to comment.