Skip to content

Commit

Permalink
Use Session instead of InteractiveSession
Browse files Browse the repository at this point in the history
Avoids weird side effects after session closes unexpectedly.
  • Loading branch information
drasmuss committed May 11, 2017
1 parent fbeda48 commit 4460a33
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 23 deletions.
2 changes: 2 additions & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ Release History
- Fix bug in uneven step_blocks rounding
- Fix bug in Simulator.print_params
- Fix bug related to merging of learning rule with different dimensionality
- Use tf.Session instead of tf.InteractiveSession, to avoid strange side
effects if the simulator isn't closed properly


0.3.0 (April 25, 2017)
Expand Down
16 changes: 9 additions & 7 deletions nengo_dl/simulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,8 +181,7 @@ def reset(self, seed=None):
log_device_placement=False,
)

self.sess = tf.InteractiveSession(graph=self.tensor_graph.graph,
config=config)
self.sess = tf.Session(graph=self.tensor_graph.graph, config=config)
self.closed = False

# initialize variables
Expand Down Expand Up @@ -679,7 +678,8 @@ def save_params(self, path):
raise SimulationError("Simulation has been closed, cannot save "
"parameters")

path = tf.train.Saver().save(self.sess, path)
with self.tensor_graph.graph.as_default():
path = tf.train.Saver().save(self.sess, path)
logger.info("Model parameters saved to %s", path)

def load_params(self, path):
Expand All @@ -694,7 +694,8 @@ def load_params(self, path):
raise SimulationError("Simulation has been closed, cannot load "
"parameters")

tf.train.Saver().restore(self.sess, path)
with self.tensor_graph.graph.as_default():
tf.train.Saver().restore(self.sess, path)

def print_params(self, msg=None):
"""Print current values of trainable network parameters.
Expand Down Expand Up @@ -861,9 +862,10 @@ def check_gradients(self, outputs=None, atol=1e-5, rtol=1e-3):
dx, dy = gradient_checker._compute_dx_and_dy(
inp, out, out_shape)

analytic = gradient_checker._compute_theoretical_jacobian(
inp, inp_shape, np.zeros(inp_shape), dy, out_shape, dx,
extra_feed_dict=feed)
with self.sess.as_default():
analytic = gradient_checker._compute_theoretical_jacobian(
inp, inp_shape, np.zeros(inp_shape), dy, out_shape, dx,
extra_feed_dict=feed)

if np.any(np.isnan(analytic)) or np.any(np.isnan(numeric)):
raise SimulationError("NaNs detected in gradient")
Expand Down
6 changes: 3 additions & 3 deletions nengo_dl/tensor_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,9 +464,9 @@ def build_loss(self, objective, targets):
else:
raise NotImplementedError

# average loss across probes (note: this will also average across
# the output of `objective` if it doesn't return a scalar)
loss = tf.reduce_mean(loss)
# average loss across probes (note: this will also average across
# the output of `objective` if it doesn't return a scalar)
loss = tf.reduce_mean(loss)

self.losses[(objective, targets)] = loss

Expand Down
19 changes: 10 additions & 9 deletions nengo_dl/tests/test_tensorflow_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,13 @@ def test_dynamic_stitch():


def test_state_grads():
v = tf.Variable([0., 0., 0.])
x = tf.ones((3,))
with tf.Session() as sess:
v = tf.Variable([0., 0., 0.])
x = tf.ones((3,))

y0 = tf.assign(v, x)
y1 = tf.assign_add(v, x)
y0 = tf.assign(v, x)
y1 = tf.assign_add(v, x)

with tf.Session() as sess:
# TODO: the ._ref() is necessary due to something in tensorflow 1.0.0,
# can remove if we upgrade requirements
grad0 = tf.gradients(y0, [v._ref(), x])
Expand All @@ -57,11 +57,12 @@ def test_state_grads():
assert np.allclose(grad_vals[1][0], 1)
assert np.allclose(grad_vals[1][1], 1)

x = tf.ones((1,))
y0 = tf.scatter_update(v, [0], x)
y1 = tf.scatter_add(v, [0], x)

with tf.Session() as sess:
v = tf.Variable([0., 0., 0.])
x = tf.ones((1,))
y0 = tf.scatter_update(v, [0], x)
y1 = tf.scatter_add(v, [0], x)

grad0 = tf.gradients(y0, [v._ref(), x])
grad1 = tf.gradients(y1, [v._ref(), x])
grad_vals = sess.run((grad0, grad1))
Expand Down
8 changes: 4 additions & 4 deletions nengo_dl/tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,11 +102,11 @@ def test_print_and_flush(capsys):


def test_print_op(capsys):
x = tf.constant(0)
y = utils.print_op(x, "hello")
z = y + 0

with tf.Session() as sess:
x = tf.constant(0)
y = utils.print_op(x, "hello")
z = y + 0

sess.run(z)

out, _ = capsys.readouterr()
Expand Down

0 comments on commit 4460a33

Please sign in to comment.