From f4a057201876fd29872d722390e2e13d2a4127ab Mon Sep 17 00:00:00 2001 From: Jonathan DEKHTIAR Date: Sat, 21 Apr 2018 14:01:13 +0200 Subject: [PATCH] Test documentation (#511) * Activation Cleaning Docstring Test * Requirements Pinned with range to insure tested versions are used. Range are used to prevent updating requirements all the time. * setup.cfg file added with PEP8 configuration * activation.py refactored * docstring fixed - ready for documentation unittest * Yapf correction for max_line_length: 120 * test yapf refactored * test documentation added * Missing requirement added: sphinx * Allow test on documentation to pass on warning * Fix travis dependencies install * Travis install script fixed * Travis install command fixed * Requirements conflict solved * Yapf Style modified and merged in file "setup.cfg" * Yapf Confiuguration Updated * Code Refactored with new YAPF formating style * Code Refactored with new YAPF formating style * Code Refactored with new YAPF formating style * shorten codes * Various Cleaning * Trailing Slashes removed * Test Recurrent Fixed * Line Width Fix * docs requirements updated * fix example docs style * Codacy Issue Fixed * Merge Errors fixed * YAPF Style Applied --- .style.yapf | 60 ---------------- .travis.yml | 2 +- docs/requirements.txt | 2 +- .../tutorial_binarynet_cifar10_tfrecord.py | 16 +++-- example/tutorial_binarynet_mnist_cnn.py | 3 +- ...ial_bipedalwalker_a3c_continuous_action.py | 10 ++- .../tutorial_dorefanet_cifar10_tfrecord.py | 16 +++-- example/tutorial_dorefanet_mnist_cnn.py | 3 +- example/tutorial_generate_text.py | 42 +++++++---- ...torial_imagenet_inceptionV3_distributed.py | 32 ++++----- example/tutorial_imdb_fasttext.py | 3 +- example/tutorial_keras.py | 6 +- example/tutorial_mlp_dropout1.py | 3 +- example/tutorial_mlp_dropout2.py | 6 +- example/tutorial_mnist.py | 31 ++++---- example/tutorial_mnist_distributed.py | 3 +- example/tutorial_mnist_float16.py | 7 +- example/tutorial_mnist_simple.py | 3 +- example/tutorial_ptb_lstm_state_is_tuple.py | 63 ++++++++-------- ...tutorial_ternaryweight_cifar10_tfrecord.py | 16 +++-- example/tutorial_ternaryweight_mnist_cnn.py | 3 +- example/tutorial_tf_dataset_voc.py | 3 +- example/tutorial_word2vec_basic.py | 12 ++-- setup.cfg | 71 +++++++++---------- tensorlayer/files.py | 4 +- tensorlayer/layers/convolution.py | 3 +- tensorlayer/layers/core.py | 5 +- tensorlayer/layers/recurrent.py | 21 +++--- .../third_party/roi_pooling/test_roi_layer.py | 3 +- tensorlayer/visualize.py | 3 +- tests/requirements.txt | 1 + tests/test_documentation.py | 44 ++++++++++++ tests/test_layers_spatial_transformer.py | 1 + tests/test_yapf_format.py | 4 +- 34 files changed, 244 insertions(+), 261 deletions(-) delete mode 100644 .style.yapf create mode 100644 tests/test_documentation.py diff --git a/.style.yapf b/.style.yapf deleted file mode 100644 index 65bdef585..000000000 --- a/.style.yapf +++ /dev/null @@ -1,60 +0,0 @@ -[style] -based_on_style=google - -# The number of columns to use for indentation. -indent_width = 4 - -# The column limit. -column_limit=120 - -# Place each dictionary entry onto its own line. -each_dict_entry_on_separate_line = True - -# Put closing brackets on a separate line, dedented, if the bracketed -# expression can't fit in a single line. Applies to all kinds of brackets, -# including function definitions and calls. For example: -# -# config = { -# 'key1': 'value1', -# 'key2': 'value2', -# } # <--- this bracket is dedented and on a separate line -# -# time_series = self.remote_client.query_entity_counters( -# entity='dev3246.region1', -# key='dns.query_latency_tcp', -# transform=Transformation.AVERAGE(window=timedelta(seconds=60)), -# start_ts=now()-timedelta(days=3), -# end_ts=now(), -# ) # <--- this bracket is dedented and on a separate line -dedent_closing_brackets=True - -# Do not split consecutive brackets. Only relevant when DEDENT_CLOSING_BRACKETS is set -coalesce_brackets = False - -# Align closing bracket with visual indentation. -align_closing_bracket_with_visual_indent = False - -# Split named assignments onto individual lines. -split_before_named_assigns = False - -# If an argument / parameter list is going to be split, then split before the first argument. -split_before_first_argument = True - -# Split before arguments if the argument list is terminated by a comma. -split_arguments_when_comma_terminated = False - -# Insert a space between the ending comma and closing bracket of a list, etc. -space_between_ending_comma_and_closing_bracket = True - -# Join short lines into one line. E.g., single line if statements. -join_multiple_lines = True - -# Do not include spaces around selected binary operators. -# Example: 1 + 2 * 3 - 4 / 5 => 1 + 2*3 - 4/5 -no_spaces_around_selected_binary_operators = True - -# Allow lambdas to be formatted on more than one line. -allow_multiline_lambdas = True - -SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = 10 -SPLIT_PENALTY_AFTER_OPENING_BRACKET = 500 \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 3c740fb85..adf459c5c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ env: install: - pip install tensorflow - pip install -r requirements.txt - - pip install .[test] + - pip install -e .[dev,doc,test] script: diff --git a/docs/requirements.txt b/docs/requirements.txt index 80e859ad0..bc48f2523 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -5,4 +5,4 @@ progressbar2>=3.37,<3.38 scikit-image>=0.13,<0.14 scipy>=1.0,<1.1 sphinx>=1.7,<1.8 -tensorflow==1.5.0 +tensorflow>=1.7,<1.8 diff --git a/example/tutorial_binarynet_cifar10_tfrecord.py b/example/tutorial_binarynet_cifar10_tfrecord.py index 7249d4954..ef15fb30b 100644 --- a/example/tutorial_binarynet_cifar10_tfrecord.py +++ b/example/tutorial_binarynet_cifar10_tfrecord.py @@ -149,11 +149,13 @@ def read_and_decode(filename, is_train=None): x_train_, y_train_ = read_and_decode("train.cifar10", True) x_test_, y_test_ = read_and_decode("test.cifar10", False) # set the number of threads here - x_train_batch, y_train_batch = tf.train.shuffle_batch([x_train_, y_train_], \ - batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32) + x_train_batch, y_train_batch = tf.train.shuffle_batch( + [x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32 + ) # for testing, uses batch instead of shuffle_batch - x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], \ - batch_size=batch_size, capacity=50000, num_threads=32) + x_test_batch, y_test_batch = tf.train.batch( + [x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32 + ) def model(x_crop, y_, reuse): """ For more simplified CNN APIs, check tensorlayer.org """ @@ -239,8 +241,10 @@ def model(x_crop, y_, reuse): n_batch += 1 if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch %d : Step %d-%d of %d took %fs" % \ - (epoch, step, step + n_step_epoch, n_step, time.time() - start_time)) + print( + "Epoch %d : Step %d-%d of %d took %fs" % + (epoch, step, step + n_step_epoch, n_step, time.time() - start_time) + ) print(" train loss: %f" % (train_loss / n_batch)) print(" train acc: %f" % (train_acc / n_batch)) diff --git a/example/tutorial_binarynet_mnist_cnn.py b/example/tutorial_binarynet_mnist_cnn.py index 3044ed642..ad71b961b 100644 --- a/example/tutorial_binarynet_mnist_cnn.py +++ b/example/tutorial_binarynet_mnist_cnn.py @@ -5,8 +5,7 @@ import tensorflow as tf import tensorlayer as tl -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) # X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False) sess = tf.InteractiveSession() diff --git a/example/tutorial_bipedalwalker_a3c_continuous_action.py b/example/tutorial_bipedalwalker_a3c_continuous_action.py index 9c2aac04d..387d3faaf 100644 --- a/example/tutorial_bipedalwalker_a3c_continuous_action.py +++ b/example/tutorial_bipedalwalker_a3c_continuous_action.py @@ -198,11 +198,14 @@ def work(self): buffer_r.append(r) if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net + if done: v_s_ = 0 # terminal else: v_s_ = sess.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0] + buffer_v_target = [] + for r in buffer_r[::-1]: # reverse buffer r v_s_ = r + GAMMA * v_s_ buffer_v_target.append(v_s_) @@ -211,12 +214,7 @@ def work(self): buffer_s, buffer_a, buffer_v_target = ( np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target) ) - - feed_dict = { - self.AC.s: buffer_s, - self.AC.a_his: buffer_a, - self.AC.v_target: buffer_v_target, - } + feed_dict = {self.AC.s: buffer_s, self.AC.a_his: buffer_a, self.AC.v_target: buffer_v_target} # update gradients on global network self.AC.update_global(feed_dict) buffer_s, buffer_a, buffer_r = [], [], [] diff --git a/example/tutorial_dorefanet_cifar10_tfrecord.py b/example/tutorial_dorefanet_cifar10_tfrecord.py index f523fedd2..59cd60e42 100644 --- a/example/tutorial_dorefanet_cifar10_tfrecord.py +++ b/example/tutorial_dorefanet_cifar10_tfrecord.py @@ -149,11 +149,13 @@ def read_and_decode(filename, is_train=None): x_train_, y_train_ = read_and_decode("train.cifar10", True) x_test_, y_test_ = read_and_decode("test.cifar10", False) # set the number of threads here - x_train_batch, y_train_batch = tf.train.shuffle_batch([x_train_, y_train_], \ - batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32) + x_train_batch, y_train_batch = tf.train.shuffle_batch( + [x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32 + ) # for testing, uses batch instead of shuffle_batch - x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], \ - batch_size=batch_size, capacity=50000, num_threads=32) + x_test_batch, y_test_batch = tf.train.batch( + [x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32 + ) def model(x_crop, y_, reuse): """ For more simplified CNN APIs, check tensorlayer.org """ @@ -235,8 +237,10 @@ def model(x_crop, y_, reuse): n_batch += 1 if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch %d : Step %d-%d of %d took %fs" % \ - (epoch, step, step + n_step_epoch, n_step, time.time() - start_time)) + print( + "Epoch %d : Step %d-%d of %d took %fs" % + (epoch, step, step + n_step_epoch, n_step, time.time() - start_time) + ) print(" train loss: %f" % (train_loss / n_batch)) print(" train acc: %f" % (train_acc / n_batch)) diff --git a/example/tutorial_dorefanet_mnist_cnn.py b/example/tutorial_dorefanet_mnist_cnn.py index d68500ae8..08c8b6035 100644 --- a/example/tutorial_dorefanet_mnist_cnn.py +++ b/example/tutorial_dorefanet_mnist_cnn.py @@ -5,8 +5,7 @@ import tensorflow as tf import tensorlayer as tl -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) # X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False) sess = tf.InteractiveSession() diff --git a/example/tutorial_generate_text.py b/example/tutorial_generate_text.py index 6d4ce1a61..a17781b2d 100644 --- a/example/tutorial_generate_text.py +++ b/example/tutorial_generate_text.py @@ -230,10 +230,13 @@ def inference(x, is_train, sequence_length, reuse=None): rnn_init = tf.random_uniform_initializer(-init_scale, init_scale) with tf.variable_scope("model", reuse=reuse): network = EmbeddingInputlayer(x, vocab_size, hidden_size, rnn_init, name='embedding') - network = RNNLayer(network, cell_fn=tf.contrib.rnn.BasicLSTMCell, \ - cell_init_args={'forget_bias': 0.0, 'state_is_tuple': True}, \ - n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False, - return_seq_2d=True, name='lstm1') + network = RNNLayer( + network, cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={ + 'forget_bias': 0.0, + 'state_is_tuple': True + }, n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False, + return_seq_2d=True, name='lstm1' + ) lstm1 = network network = DenseLayer(network, vocab_size, W_init=rnn_init, b_init=rnn_init, act=tf.identity, name='output') return network, lstm1 @@ -297,14 +300,21 @@ def loss_fn(outputs, targets, batch_size, sequence_length): ## reset all states at the begining of every epoch state1 = tl.layers.initialize_rnn_state(lstm1.initial_state) for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)): - _cost, state1, _ = sess.run([cost, lstm1.final_state, train_op], \ - feed_dict={input_data: x, targets: y, lstm1.initial_state: state1}) + _cost, state1, _ = sess.run( + [cost, lstm1.final_state, train_op], feed_dict={ + input_data: x, + targets: y, + lstm1.initial_state: state1 + } + ) costs += _cost iters += sequence_length if step % (epoch_size // 10) == 1: - print("%.3f perplexity: %.3f speed: %.0f wps" % \ - (step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time))) + print( + "%.3f perplexity: %.3f speed: %.0f wps" % + (step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time)) + ) train_perplexity = np.exp(costs / iters) # print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity)) @@ -319,14 +329,22 @@ def loss_fn(outputs, targets, batch_size, sequence_length): # feed the seed to initialize the state for generation. for ids in outs_id[:-1]: a_id = np.asarray(ids).reshape(1, 1) - state1 = sess.run([lstm1_test.final_state], \ - feed_dict={input_data_test: a_id, lstm1_test.initial_state: state1}) + state1 = sess.run( + [lstm1_test.final_state], feed_dict={ + input_data_test: a_id, + lstm1_test.initial_state: state1 + } + ) # feed the last word in seed, and start to generate sentence. a_id = outs_id[-1] for _ in range(print_length): a_id = np.asarray(a_id).reshape(1, 1) - out, state1 = sess.run([y_soft, lstm1_test.final_state], \ - feed_dict={input_data_test: a_id, lstm1_test.initial_state: state1}) + out, state1 = sess.run( + [y_soft, lstm1_test.final_state], feed_dict={ + input_data_test: a_id, + lstm1_test.initial_state: state1 + } + ) ## Without sampling # a_id = np.argmax(out[0]) ## Sample from all words, if vocab_size is large, diff --git a/example/tutorial_imagenet_inceptionV3_distributed.py b/example/tutorial_imagenet_inceptionV3_distributed.py index fc9495e74..4d02a72d1 100644 --- a/example/tutorial_imagenet_inceptionV3_distributed.py +++ b/example/tutorial_imagenet_inceptionV3_distributed.py @@ -25,8 +25,7 @@ from tensorflow.python.framework.errors_impl import OutOfRangeError from tensorflow.python.training import session_run_hook from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook -from tensorflow.python.training.monitored_session import \ - SingularMonitoredSession +from tensorflow.python.training.monitored_session import SingularMonitoredSession import tensorlayer as tl @@ -294,18 +293,15 @@ def calculate_metrics(predicted_batch, real_batch, threshold=0.5, is_training=Fa def run_evaluator(task_spec, checkpoints_path, batch_size=32): with tf.Graph().as_default(): # load dataset - images_input, one_hot_classes, num_classes, _dataset_size = \ - load_data(file=VAL_FILE, - task_spec=task_spec, - batch_size=batch_size, - epochs=1) + images_input, one_hot_classes, num_classes, _dataset_size = load_data( + file=VAL_FILE, task_spec=task_spec, batch_size=batch_size, epochs=1 + ) _network, predictions = build_network(images_input, num_classes=num_classes, is_training=False) saver = tf.train.Saver() # metrics - metrics_init_ops, _, metrics_ops = \ - calculate_metrics(predicted_batch=predictions, - real_batch=one_hot_classes, - is_training=False) + metrics_init_ops, _, metrics_ops = calculate_metrics( + predicted_batch=predictions, real_batch=one_hot_classes, is_training=False + ) # tensorboard summary summary_op = tf.summary.merge_all() # session hook @@ -338,12 +334,9 @@ def run_worker(task_spec, checkpoints_path, batch_size=32, epochs=10): global_step = tf.train.get_or_create_global_step() with tf.device(device_fn): # load dataset - images_input, one_hot_classes, num_classes, dataset_size = \ - load_data(file=TRAIN_FILE, - task_spec=task_spec, - batch_size=batch_size, - epochs=epochs, - shuffle_size=10000) + images_input, one_hot_classes, num_classes, dataset_size = load_data( + file=TRAIN_FILE, task_spec=task_spec, batch_size=batch_size, epochs=epochs, shuffle_size=10000 + ) # network network, predictions = build_network(images_input, num_classes=num_classes, is_training=True) # training operations @@ -390,8 +383,9 @@ def run_worker(task_spec, checkpoints_path, batch_size=32, epochs=10): last_log_time = time.time() next_log_time = last_log_time + 60 while not sess.should_stop(): - step, loss_val, learning_rate_val, _, metrics = \ - sess.run([global_step, loss, learning_rate, train_op, metrics_ops]) + step, loss_val, learning_rate_val, _, metrics = sess.run( + [global_step, loss, learning_rate, train_op, metrics_ops] + ) if task_spec is None or task_spec.is_master(): now = time.time() if now > next_log_time: diff --git a/example/tutorial_imdb_fasttext.py b/example/tutorial_imdb_fasttext.py index eecc6c825..5b9a262e3 100644 --- a/example/tutorial_imdb_fasttext.py +++ b/example/tutorial_imdb_fasttext.py @@ -110,8 +110,7 @@ def hash_ngram(ngram): def load_and_preprocess_imdb_data(n_gram=None): """Load IMDb data and augment with hashed n-gram features.""" - X_train, y_train, X_test, y_test = \ - tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE) + X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE) if n_gram is not None: X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train]) diff --git a/example/tutorial_keras.py b/example/tutorial_keras.py index e3eefaee5..c06ce6b28 100644 --- a/example/tutorial_keras.py +++ b/example/tutorial_keras.py @@ -8,8 +8,7 @@ from keras.layers import * from tensorlayer.layers import * -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1, 784)) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) sess = tf.InteractiveSession() @@ -43,8 +42,7 @@ def keras_block(x): learning_rate = 0.0001 train_params = network.all_params -train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, - use_locking=False).minimize(cost, var_list=train_params) +train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, var_list=train_params) tl.layers.initialize_global_variables(sess) diff --git a/example/tutorial_mlp_dropout1.py b/example/tutorial_mlp_dropout1.py index 8a8d4e818..029c74eb6 100644 --- a/example/tutorial_mlp_dropout1.py +++ b/example/tutorial_mlp_dropout1.py @@ -5,8 +5,7 @@ sess = tf.InteractiveSession() # prepare data -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1,784)) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) # define placeholder x = tf.placeholder(tf.float32, shape=[None, 784], name='x') y_ = tf.placeholder(tf.int64, shape=[None], name='y_') diff --git a/example/tutorial_mlp_dropout2.py b/example/tutorial_mlp_dropout2.py index 0f70e2225..f3a7bdeeb 100644 --- a/example/tutorial_mlp_dropout2.py +++ b/example/tutorial_mlp_dropout2.py @@ -5,8 +5,7 @@ sess = tf.InteractiveSession() # prepare data -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1,784)) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) # define placeholder x = tf.placeholder(tf.float32, shape=[None, 784], name='x') y_ = tf.placeholder(tf.int64, shape=[None], name='y_') @@ -41,8 +40,7 @@ def mlp(x, is_train=True, reuse=False): # define the optimizer train_params = tl.layers.get_variables_with_name('MLP', train_only=True, printable=False) -train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999, epsilon=1e-08, - use_locking=False).minimize(cost, var_list=train_params) +train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params) # initialize all variables in the session tl.layers.initialize_global_variables(sess) diff --git a/example/tutorial_mnist.py b/example/tutorial_mnist.py index f7c915ecc..db3806c1d 100644 --- a/example/tutorial_mnist.py +++ b/example/tutorial_mnist.py @@ -12,13 +12,13 @@ """ import time + import tensorflow as tf import tensorlayer as tl def main_test_layers(model='relu'): - X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1,784)) + X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) print('X_train.shape', X_train.shape) print('y_train.shape', y_train.shape) @@ -156,8 +156,7 @@ def main_test_layers(model='relu'): def main_test_denoise_AE(model='relu'): - X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1,784)) + X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) sess = tf.InteractiveSession() @@ -186,8 +185,10 @@ def main_test_denoise_AE(model='relu'): ## pretrain print("Pre-train Layer 1") - recon_layer1.pretrain(sess, x=x, X_train=X_train, X_val=X_val, denoise_name='denoising1', \ - n_epoch=200, batch_size=128, print_freq=10, save=True, save_name='w1pre_') + recon_layer1.pretrain( + sess, x=x, X_train=X_train, X_val=X_val, denoise_name='denoising1', n_epoch=200, batch_size=128, print_freq=10, + save=True, save_name='w1pre_' + ) # You can also disable denoisong by setting denoise_name=None. # recon_layer1.pretrain(sess, x=x, X_train=X_train, X_val=X_val, # denoise_name=None, n_epoch=500, batch_size=128, @@ -203,8 +204,7 @@ def main_test_denoise_AE(model='relu'): def main_test_stacked_denoise_AE(model='relu'): - X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1,784)) + X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) sess = tf.InteractiveSession() @@ -257,11 +257,15 @@ def main_test_stacked_denoise_AE(model='relu'): print("\nAll net Params before pre-train") net.print_params() print("\nPre-train Layer 1") - recon_layer1.pretrain(sess, x=x, X_train=X_train, X_val=X_val, denoise_name='denoising1', \ - n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre_') + recon_layer1.pretrain( + sess, x=x, X_train=X_train, X_val=X_val, denoise_name='denoising1', n_epoch=100, batch_size=128, print_freq=10, + save=True, save_name='w1pre_' + ) print("\nPre-train Layer 2") - recon_layer2.pretrain(sess, x=x, X_train=X_train, X_val=X_val, denoise_name='denoising1', \ - n_epoch=100, batch_size=128, print_freq=10, save=False) + recon_layer2.pretrain( + sess, x=x, X_train=X_train, X_val=X_val, denoise_name='denoising1', n_epoch=100, batch_size=128, print_freq=10, + save=False + ) print("\nAll net Params after pre-train") net.print_params() @@ -346,8 +350,7 @@ def main_test_cnn_layer(): - For simplified CNN layer see "Convolutional layer (Simplified)" in read the docs website. """ - X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) + X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) sess = tf.InteractiveSession() diff --git a/example/tutorial_mnist_distributed.py b/example/tutorial_mnist_distributed.py index befa9ac41..6f2e8bd67 100644 --- a/example/tutorial_mnist_distributed.py +++ b/example/tutorial_mnist_distributed.py @@ -22,8 +22,7 @@ device_fn = task_spec.device_fn() if task_spec is not None else None # prepare data -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1,784)) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) # create graph with tf.device(device_fn): diff --git a/example/tutorial_mnist_float16.py b/example/tutorial_mnist_float16.py index 307a4f724..1f2df0699 100644 --- a/example/tutorial_mnist_float16.py +++ b/example/tutorial_mnist_float16.py @@ -8,8 +8,7 @@ LayersConfig.tf_dtype = tf.float16 # tf.float32 tf.float16 -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) sess = tf.InteractiveSession() @@ -58,8 +57,8 @@ def model(x, is_train=True, reuse=False): train_params = tl.layers.get_variables_with_name('model', train_only=True, printable=False) # for float16 epsilon=1e-4 see https://stackoverflow.com/questions/42064941/tensorflow-float16-support-is-broken # for float32 epsilon=1e-08 -train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999, \ - epsilon=1e-4, use_locking=False).minimize(cost, var_list=train_params) +train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999, epsilon=1e-4, + use_locking=False).minimize(cost, var_list=train_params) # initialize all variables in the session tl.layers.initialize_global_variables(sess) diff --git a/example/tutorial_mnist_simple.py b/example/tutorial_mnist_simple.py index f4d6c3751..fa54c6310 100644 --- a/example/tutorial_mnist_simple.py +++ b/example/tutorial_mnist_simple.py @@ -7,8 +7,7 @@ sess = tf.InteractiveSession() # prepare data -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1, 784)) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) # define placeholder x = tf.placeholder(tf.float32, shape=[None, 784], name='x') y_ = tf.placeholder(tf.int64, shape=[None], name='y_') diff --git a/example/tutorial_ptb_lstm_state_is_tuple.py b/example/tutorial_ptb_lstm_state_is_tuple.py index 9b649b84d..6eeebc701 100644 --- a/example/tutorial_ptb_lstm_state_is_tuple.py +++ b/example/tutorial_ptb_lstm_state_is_tuple.py @@ -5,11 +5,11 @@ This is a reimpmentation of the TensorFlow official PTB example in : tensorflow/models/rnn/ptb -The batch_size can be seem as how many concurrent computations.\n -As the following example shows, the first batch learn the sequence information by using 0 to 9.\n -The second batch learn the sequence information by using 10 to 19.\n -So it ignores the information from 9 to 10 !\n -If only if we set the batch_size = 1, it will consider all information from 0 to 20.\n +The batch_size can be seem as how many concurrent computations.n +As the following example shows, the first batch learn the sequence information by using 0 to 9.n +The second batch learn the sequence information by using 10 to 19.n +So it ignores the information from 9 to 10 !n +If only if we set the batch_size = 1, it will consider all information from 0 to 20.n The meaning of batch_size here is not the same with the MNIST example. In MNIST example, batch_size reflects how many examples we consider in each iteration, while in @@ -24,12 +24,12 @@ At the begining of each epoch, we initialize (reset) the 20 RNN states for 20 segments, then go through 20 segments separately. -The training data will be generated as follow:\n +The training data will be generated as follow:n >>> train_data = [i for i in range(20)] >>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3): >>> x, y = batch ->>> print(x, '\n',y) +>>> print(x, 'n',y) ... [[ 0 1 2] <---x 1st subset/ iteration ... [10 11 12]] ... [[ 1 2 3] <---y @@ -282,7 +282,7 @@ def loss_fn(outputs, targets, batch_size): net.print_layers() tl.layers.print_all_variables() - print("\nStart learning a language model by using PTB dataset") + print("nStart learning a language model by using PTB dataset") for i in range(max_max_epoch): # decreases the initial learning rate after several # epoachs (defined by ``max_epoch``), by multipling a ``lr_decay``. @@ -309,15 +309,10 @@ def loss_fn(outputs, targets, batch_size): } # For training, enable dropout feed_dict.update(net.all_drop) - _cost, state1_c, state1_h, state2_c, state2_h, _ = \ - sess.run([cost, - lstm1.final_state.c, - lstm1.final_state.h, - lstm2.final_state.c, - lstm2.final_state.h, - train_op], - feed_dict=feed_dict - ) + _cost, state1_c, state1_h, state2_c, state2_h, _ = sess.run( + [cost, lstm1.final_state.c, lstm1.final_state.h, lstm2.final_state.c, lstm2.final_state.h, train_op], + feed_dict=feed_dict + ) state1 = (state1_c, state1_h) state2 = (state2_c, state2_h) @@ -348,15 +343,13 @@ def loss_fn(outputs, targets, batch_size): lstm2_val.initial_state.c: state2[0], lstm2_val.initial_state.h: state2[1], } - _cost, state1_c, state1_h, state2_c, state2_h, _ = \ - sess.run([cost_val, - lstm1_val.final_state.c, - lstm1_val.final_state.h, - lstm2_val.final_state.c, - lstm2_val.final_state.h, - tf.no_op()], - feed_dict=feed_dict - ) + _cost, state1_c, state1_h, state2_c, state2_h, _ = sess.run( + [ + cost_val, lstm1_val.final_state.c, lstm1_val.final_state.h, lstm2_val.final_state.c, + lstm2_val.final_state.h, + tf.no_op() + ], feed_dict=feed_dict + ) state1 = (state1_c, state1_h) state2 = (state2_c, state2_h) costs += _cost @@ -382,15 +375,15 @@ def loss_fn(outputs, targets, batch_size): lstm2_test.initial_state.c: state2[0], lstm2_test.initial_state.h: state2[1], } - _cost, state1_c, state1_h, state2_c, state2_h = \ - sess.run([cost_test, - lstm1_test.final_state.c, - lstm1_test.final_state.h, - lstm2_test.final_state.c, - lstm2_test.final_state.h, - ], - feed_dict=feed_dict - ) + _cost, state1_c, state1_h, state2_c, state2_h = sess.run( + [ + cost_test, + lstm1_test.final_state.c, + lstm1_test.final_state.h, + lstm2_test.final_state.c, + lstm2_test.final_state.h, + ], feed_dict=feed_dict + ) state1 = (state1_c, state1_h) state2 = (state2_c, state2_h) costs += _cost diff --git a/example/tutorial_ternaryweight_cifar10_tfrecord.py b/example/tutorial_ternaryweight_cifar10_tfrecord.py index 0a5ea3c99..67d004f14 100644 --- a/example/tutorial_ternaryweight_cifar10_tfrecord.py +++ b/example/tutorial_ternaryweight_cifar10_tfrecord.py @@ -148,11 +148,13 @@ def read_and_decode(filename, is_train=None): x_train_, y_train_ = read_and_decode("train.cifar10", True) x_test_, y_test_ = read_and_decode("test.cifar10", False) # set the number of threads here - x_train_batch, y_train_batch = tf.train.shuffle_batch([x_train_, y_train_], \ - batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32) + x_train_batch, y_train_batch = tf.train.shuffle_batch( + [x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32 + ) # for testing, uses batch instead of shuffle_batch - x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], \ - batch_size=batch_size, capacity=50000, num_threads=32) + x_test_batch, y_test_batch = tf.train.batch( + [x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32 + ) def model(x_crop, y_, reuse): """ For more simplified CNN APIs, check tensorlayer.org """ @@ -234,8 +236,10 @@ def model(x_crop, y_, reuse): n_batch += 1 if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch %d : Step %d-%d of %d took %fs" % \ - (epoch, step, step + n_step_epoch, n_step, time.time() - start_time)) + print( + "Epoch %d : Step %d-%d of %d took %fs" % + (epoch, step, step + n_step_epoch, n_step, time.time() - start_time) + ) print(" train loss: %f" % (train_loss / n_batch)) print(" train acc: %f" % (train_acc / n_batch)) diff --git a/example/tutorial_ternaryweight_mnist_cnn.py b/example/tutorial_ternaryweight_mnist_cnn.py index 23d23b4b5..44dd03d92 100644 --- a/example/tutorial_ternaryweight_mnist_cnn.py +++ b/example/tutorial_ternaryweight_mnist_cnn.py @@ -7,8 +7,7 @@ import tensorlayer as tl -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) # X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False) sess = tf.InteractiveSession() diff --git a/example/tutorial_tf_dataset_voc.py b/example/tutorial_tf_dataset_voc.py index 5f32305bd..b678380e9 100644 --- a/example/tutorial_tf_dataset_voc.py +++ b/example/tutorial_tf_dataset_voc.py @@ -12,8 +12,7 @@ import tensorflow as tf import tensorlayer as tl -imgs_file_list, _, _, _, classes, _, _,\ - _, objs_info_list, _ = tl.files.load_voc_dataset(dataset="2007") +imgs_file_list, _, _, _, classes, _, _, _, objs_info_list, _ = tl.files.load_voc_dataset(dataset="2007") ann_list = [] for info in objs_info_list: diff --git a/example/tutorial_word2vec_basic.py b/example/tutorial_word2vec_basic.py index bdeab7747..4dc7670b0 100644 --- a/example/tutorial_word2vec_basic.py +++ b/example/tutorial_word2vec_basic.py @@ -133,8 +133,7 @@ def main_word2vec_basic(): dictionary = all_var['dictionary'] reverse_dictionary = all_var['reverse_dictionary'] else: - data, count, dictionary, reverse_dictionary = \ - tl.nlp.build_words_dataset(words, vocabulary_size, True, _UNK) + data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True, _UNK) print('Most 5 common words (+UNK)', count[:5]) # [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)] @@ -293,8 +292,7 @@ def main_word2vec_basic(): print() # from tensorflow/models/embedding/word2vec.py - analogy_questions = tl.nlp.read_analogies_file( \ - eval_file='questions-words.txt', word2id=dictionary) + analogy_questions = tl.nlp.read_analogies_file(eval_file='questions-words.txt', word2id=dictionary) # The eval feeds three vectors of word ids for a, b, c, each of # which is of size N, where N is the number of analogies we want to # evaluate in one batch. @@ -341,8 +339,10 @@ def predict(analogy): # if one of the top 4 answers in correct, win ! if idx[question, j] == sub[question, 3]: # Bingo! We predicted correctly. E.g., [italy, rome, france, paris]. - print(j+1, tl.nlp.word_ids_to_words([idx[question, j]], reverse_dictionary) \ - , ':', tl.nlp.word_ids_to_words(sub[question, :], reverse_dictionary)) + print( + j + 1, tl.nlp.word_ids_to_words([idx[question, j]], reverse_dictionary), ':', + tl.nlp.word_ids_to_words(sub[question, :], reverse_dictionary) + ) correct += 1 break elif idx[question, j] in sub[question, :3]: diff --git a/setup.cfg b/setup.cfg index 96f68c83d..9b5363fbd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,49 +15,52 @@ exclude = img [yapf] -based_on_style = pep8 +based_on_style=google # The number of columns to use for indentation. indent_width = 4 -# The column limit (or max line-length) -column_limit = 120 - -# Do not split consecutive brackets. Only relevant when DEDENT_CLOSING_BRACKETS is set -coalesce_brackets = False - -# Put closing brackets on a separate line, dedented, -# if the bracketed expression can't fit in a single line. -dedent_closing_brackets = True +# The column limit. +column_limit=120 # Place each dictionary entry onto its own line. each_dict_entry_on_separate_line = True -# For list comprehensions and generator expressions with multiple clauses -# (e.g multiple "for" calls, "if" filter expressions) -# and which need to be reflowed, split each clause onto its own line -split_complex_comprehension = True +# Put closing brackets on a separate line, dedented, if the bracketed +# expression can't fit in a single line. Applies to all kinds of brackets, +# including function definitions and calls. For example: +# +# config = { +# 'key1': 'value1', +# 'key2': 'value2', +# } # <--- this bracket is dedented and on a separate line +# +# time_series = self.remote_client.query_entity_counters( +# entity='dev3246.region1', +# key='dns.query_latency_tcp', +# transform=Transformation.AVERAGE(window=timedelta(seconds=60)), +# start_ts=now()-timedelta(days=3), +# end_ts=now(), +# ) # <--- this bracket is dedented and on a separate line +dedent_closing_brackets=True -# Split before arguments if the argument list is terminated by a comma. -split_arguments_when_comma_terminated = True +# Do not split consecutive brackets. Only relevant when DEDENT_CLOSING_BRACKETS is set +coalesce_brackets = False + +# Align closing bracket with visual indentation. +align_closing_bracket_with_visual_indent = False # Split named assignments onto individual lines. -split_before_named_assigns = True +split_before_named_assigns = False # If an argument / parameter list is going to be split, then split before the first argument. -split_before_first_argument = False - -# Split after the opening paren which surrounds an expression if it doesn't fit on a single line. -split_before_expression_after_opening_paren = True +split_before_first_argument = True -# Split before the closing bracket if a list or dict literal doesn't fit on a single line. -# split_before_closing_bracket = True - -# Allow lambdas to be formatted on more than one line. -allow_multiline_lambdas = True +# Split before arguments if the argument list is terminated by a comma. +split_arguments_when_comma_terminated = False -# Allow splits before the dictionary value. -allow_split_before_dict_value = False +# Insert a space between the ending comma and closing bracket of a list, etc. +space_between_ending_comma_and_closing_bracket = True # Join short lines into one line. E.g., single line if statements. join_multiple_lines = True @@ -66,12 +69,8 @@ join_multiple_lines = True # Example: 1 + 2 * 3 - 4 / 5 => 1 + 2*3 - 4/5 no_spaces_around_selected_binary_operators = True +# Allow lambdas to be formatted on more than one line. +allow_multiline_lambdas = True -SPLIT_PENALTY_AFTER_OPENING_BRACKET = -30 -#SPLIT_PENALTY_AFTER_UNARY_OPERATOR = -5000 -#SPLIT_PENALTY_BEFORE_IF_EXPR = -5000 -SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = -8 -#SPLIT_PENALTY_IMPORT_NAMES = -5000 -#SPLIT_PENALTY_LOGICAL_OPERATOR = -5000 - - +SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = 10 +SPLIT_PENALTY_AFTER_OPENING_BRACKET = 500 \ No newline at end of file diff --git a/tensorlayer/files.py b/tensorlayer/files.py index 25c38dcf4..cd505e269 100644 --- a/tensorlayer/files.py +++ b/tensorlayer/files.py @@ -1308,9 +1308,7 @@ def convert_annotation(file_name): data = _recursive_parse_xml_to_dict(xml)['annotation'] objs_info_dicts.update({imgs_file_list[idx]: data}) - return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, \ - classes, classes_in_person, classes_dict,\ - n_objs_list, objs_info_list, objs_info_dicts + return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, classes, classes_in_person, classes_dict, n_objs_list, objs_info_list, objs_info_dicts def load_mpii_pose_dataset(path='data', is_16_pos_only=False): diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py index 0d47f861b..fdbfefe85 100644 --- a/tensorlayer/layers/convolution.py +++ b/tensorlayer/layers/convolution.py @@ -1318,8 +1318,7 @@ def deconv2d_bilinear_upsampling_initializer(shape): center = scale_factor - 0.5 for x in range(filter_size): for y in range(filter_size): - bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * \ - (1 - abs(y - center) / scale_factor) + bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * (1 - abs(y - center) / scale_factor) weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels)) for i in range(num_out_channels): weights[:, :, i, i] = bilinear_kernel diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py index ad5fcc4b7..b502fcc4c 100644 --- a/tensorlayer/layers/core.py +++ b/tensorlayer/layers/core.py @@ -1074,8 +1074,9 @@ def __init__( # ce = cost.cross_entropy(y, x_recon) # : list , list , Error (only be used for softmax output) # ce = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, x_recon)) # : list , list , Error (only be used for softmax output) # ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, x_recon)) # : list , index , Error (only be used for softmax output) - L2_w = tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[0]) \ - + tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below + L2_w = tf.contrib.layers.l2_regularizer(lambda_l2_w)( + self.train_params[0] + ) + tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below # L2_w = lambda_l2_w * tf.reduce_mean(tf.square(self.train_params[0])) + lambda_l2_w * tf.reduce_mean( tf.square(self.train_params[2])) # DropNeuro diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index fb32005d9..8db172cea 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -391,10 +391,9 @@ def __init__( DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper except Exception: DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper - cell_creator = lambda is_last=True: \ - DropoutWrapper_fn(rnn_creator(), - input_keep_prob=in_keep_prob, - output_keep_prob=out_keep_prob if is_last else 1.0) + cell_creator = lambda is_last=True: DropoutWrapper_fn( + rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=out_keep_prob if is_last else 1.0 + ) else: cell_creator = rnn_creator self.fw_cell = cell_creator() @@ -1134,10 +1133,9 @@ def __init__( # cell_instance_fn1(), # input_keep_prob=in_keep_prob, # output_keep_prob=out_keep_prob) - cell_creator = lambda is_last=True: \ - DropoutWrapper_fn(rnn_creator(), - input_keep_prob=in_keep_prob, - output_keep_prob=out_keep_prob if is_last else 1.0) + cell_creator = lambda is_last=True: DropoutWrapper_fn( + rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=out_keep_prob if is_last else 1.0 + ) else: cell_creator = rnn_creator self.cell = cell_creator() @@ -1400,10 +1398,9 @@ def __init__( # cell_instance_fn1(), # input_keep_prob=in_keep_prob, # output_keep_prob=out_keep_prob) - cell_creator = lambda is_last=True: \ - DropoutWrapper_fn(rnn_creator(), - input_keep_prob=in_keep_prob, - output_keep_prob=out_keep_prob if is_last else 1.0) + cell_creator = lambda is_last=True: DropoutWrapper_fn( + rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=out_keep_prob if is_last else 1.0 + ) else: cell_creator = rnn_creator diff --git a/tensorlayer/third_party/roi_pooling/test_roi_layer.py b/tensorlayer/third_party/roi_pooling/test_roi_layer.py index d0e27449a..301b80e92 100644 --- a/tensorlayer/third_party/roi_pooling/test_roi_layer.py +++ b/tensorlayer/third_party/roi_pooling/test_roi_layer.py @@ -1,6 +1,5 @@ from tensorlayer.layers import * -from tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops import \ - roi_pooling +from tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops import roi_pooling # from roi_pooling.roi_pooling_ops import roi_pooling diff --git a/tensorlayer/visualize.py b/tensorlayer/visualize.py index 4e7f44133..5b0fbf8ef 100644 --- a/tensorlayer/visualize.py +++ b/tensorlayer/visualize.py @@ -584,8 +584,7 @@ def plot_with_labels(low_dim_embs, labels, figsize=(18, 18), second=5, saveable= # plot_only = 500 low_dim_embs = tsne.fit_transform(embeddings[:plot_only, :]) labels = [reverse_dictionary[i] for i in xrange(plot_only)] - plot_with_labels(low_dim_embs, labels, second=second, saveable=saveable, \ - name=name, fig_idx=fig_idx) + plot_with_labels(low_dim_embs, labels, second=second, saveable=saveable, name=name, fig_idx=fig_idx) except ImportError: logging.info("Please install sklearn and matplotlib to visualize embeddings.") diff --git a/tests/requirements.txt b/tests/requirements.txt index 43144cf40..2af4e6074 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -5,4 +5,5 @@ pytest>=3.4,<3.5 pytest-cache>=1.0,<1.1 pytest-cov>=2.5,<2.6 pytest-xdist>=1.22,<1.23 +sphinx>=1.7,<1.8 yapf>=0.20,<0.21 \ No newline at end of file diff --git a/tests/test_documentation.py b/tests/test_documentation.py new file mode 100644 index 000000000..10f7a64ff --- /dev/null +++ b/tests/test_documentation.py @@ -0,0 +1,44 @@ +import unittest +from sphinx.application import Sphinx + + +class DocTest(unittest.TestCase): + source_dir = u'docs/' + config_dir = u'docs/' + output_dir = u'docs/build' + doctree_dir = u'docs/build/doctrees' + + all_files = True + + def test_html_documentation(self): + app = Sphinx( + self.source_dir, + self.config_dir, + self.output_dir, + self.doctree_dir, + buildername='html', + warningiserror=True, + ) + app.build(force_all=self.all_files) + # TODO: additional checks here if needed + + def test_text_documentation(self): + # The same, but with different buildername + app = Sphinx( + self.source_dir, + self.config_dir, + self.output_dir, + self.doctree_dir, + buildername='text', + warningiserror=False, + ) + app.build(force_all=self.all_files) + # TODO: additional checks if needed + + def tearDown(self): + # TODO: clean up the output directory + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_layers_spatial_transformer.py b/tests/test_layers_spatial_transformer.py index b415b5a04..b85442fdd 100644 --- a/tests/test_layers_spatial_transformer.py +++ b/tests/test_layers_spatial_transformer.py @@ -29,6 +29,7 @@ def model(x, is_train, reuse): n = tl.layers.Conv2d( n, n_filter=16, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, padding='SAME', name='conv1' ) + n = tl.layers.Conv2d( n, n_filter=16, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, padding='SAME', name='conv2' ) diff --git a/tests/test_yapf_format.py b/tests/test_yapf_format.py index 1eb6cc497..b51ffa9db 100644 --- a/tests/test_yapf_format.py +++ b/tests/test_yapf_format.py @@ -39,7 +39,7 @@ def test_files_format(self): code = _read_utf_8_file(file) # https://pypi.python.org/pypi/yapf/0.20.2#example-as-a-module - diff, changed = FormatCode(code, filename=file, style_config='.style.yapf', print_diff=True) + diff, changed = FormatCode(code, filename=file, style_config='setup.cfg', print_diff=True) if changed: print(diff) @@ -51,7 +51,7 @@ def test_files_format(self): if self.badly_formatted_files: for filename in self.badly_formatted_files: - str_err += 'yapf -i --style=.style.yapf %s\n' % filename + str_err += 'yapf -i --style=setup.cfg %s\n' % filename str_err = "\n======================================================================================\n" \ "Bad Coding Style: %d file(s) need to be formatted, run the following commands to fix: \n%s" \