I successfully installed jupyter, scipy, matplotlib and all the dependencies necessary to run the notebooks here:
https://github.com/Hvass-Labs/TensorFlow-Tutorials
Somehow, I cannot believe I am using the gpu on the nano board because things go quite slowly. However, tegrastats shows blips of 99% GRD utilization, although CPU usage is more consistent.
When I load up lesson 14, deepdream.py, line 24 begins to execute
img_result = recursive_optimize(layer_tensor=layer_tensor, image=image,
num_iterations=10, step_size=3.0, rescale_factor=0.7,
num_repeats=4, blend=0.2)
but after recursive level 4 I get this error
Processing image:
---------------------------------------------------------------------------
ResourceExhaustedError Traceback (most recent call last)
~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1333 try:
-> 1334 return fn(*args)
1335 except errors.OpError as e:
~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1318 return self._call_tf_sessionrun(
-> 1319 options, feed_dict, fetch_list, target_list, run_metadata)
1320
~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1406 self._session, options, feed_dict, fetch_list, target_list,
-> 1407 run_metadata)
1408
ResourceExhaustedError: OOM when allocating tensor with shape[1,77,120,192] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[{{node gradients_6/Square_6_grad/Mul}}]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
[[{{node gradients_6/conv2d0_pre_relu/conv_grad/Conv2DBackpropInput}}]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
During handling of the above exception, another exception occurred:
ResourceExhaustedError Traceback (most recent call last)
<ipython-input-24-ab4fa17c6690> in <module>
1 img_result = recursive_optimize(layer_tensor=layer_tensor, image=image,
2 num_iterations=10, step_size=3.0, rescale_factor=0.7,
----> 3 num_repeats=4, blend=0.2)
<ipython-input-17-f3924ae5544f> in recursive_optimize(layer_tensor, image, num_repeats, rescale_factor, blend, num_iterations, step_size, tile_size)
57 num_iterations=num_iterations,
58 step_size=step_size,
---> 59 tile_size=tile_size)
60
61 return img_result
<ipython-input-16-d6efbaa95b77> in optimize_image(layer_tensor, image, num_iterations, step_size, tile_size, show_gradient)
35 # maximize the mean of the given layer-tensor.
36 grad = tiled_gradient(gradient=gradient, image=img,
---> 37 tile_size=tile_size)
38
39 # Blur the gradient with different amounts and add
<ipython-input-15-295b93b138cd> in tiled_gradient(gradient, image, tile_size)
50
51 # Use TensorFlow to calculate the gradient-value.
---> 52 g = session.run(gradient, feed_dict=feed_dict)
53
54 # Normalize the gradient for the tile. This is
~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
927 try:
928 result = self._run(None, fetches, feed_dict, options_ptr,
--> 929 run_metadata_ptr)
930 if run_metadata:
931 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1150 if final_fetches or final_targets or (handle and feed_dict_tensor):
1151 results = self._do_run(handle, final_targets, final_fetches,
-> 1152 feed_dict_tensor, options, run_metadata)
1153 else:
1154 results = []
~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1326 if handle is None:
1327 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1328 run_metadata)
1329 else:
1330 return self._do_call(_prun_fn, handle, feeds, fetches)
~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1346 pass
1347 message = error_interpolation.interpolate(message, self._graph)
-> 1348 raise type(e)(node_def, op, message)
1349
1350 def _extend_graph(self):
ResourceExhaustedError: OOM when allocating tensor with shape[1,77,120,192] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[node gradients_6/Square_6_grad/Mul (defined at /home/stefan/Documents/TensorFlow-Tutorials-master/inception5h.py:167) ]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
[[node gradients_6/conv2d0_pre_relu/conv_grad/Conv2DBackpropInput (defined at /home/stefan/Documents/TensorFlow-Tutorials-master/inception5h.py:167) ]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
Caused by op 'gradients_6/Square_6_grad/Mul', defined at:
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/stefan/.local/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/home/stefan/.local/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/home/stefan/.local/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 505, in start
self.io_loop.start()
File "/home/stefan/.local/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 148, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.6/asyncio/base_events.py", line 427, in run_forever
self._run_once()
File "/usr/lib/python3.6/asyncio/base_events.py", line 1440, in _run_once
handle._run()
File "/usr/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/home/stefan/.local/lib/python3.6/site-packages/tornado/ioloop.py", line 690, in <lambda>
lambda f: self._run_callback(functools.partial(callback, future))
File "/home/stefan/.local/lib/python3.6/site-packages/tornado/ioloop.py", line 743, in _run_callback
ret = callback()
File "/home/stefan/.local/lib/python3.6/site-packages/tornado/gen.py", line 781, in inner
self.run()
File "/home/stefan/.local/lib/python3.6/site-packages/tornado/gen.py", line 742, in run
yielded = self.gen.send(value)
File "/home/stefan/.local/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 357, in process_one
yield gen.maybe_future(dispatch(*args))
File "/home/stefan/.local/lib/python3.6/site-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/home/stefan/.local/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 267, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "/home/stefan/.local/lib/python3.6/site-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/home/stefan/.local/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 534, in execute_request
user_expressions, allow_stdin,
File "/home/stefan/.local/lib/python3.6/site-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/home/stefan/.local/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 294, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/home/stefan/.local/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/home/stefan/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2848, in run_cell
raw_cell, store_history, silent, shell_futures)
File "/home/stefan/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2874, in _run_cell
return runner(coro)
File "/home/stefan/.local/lib/python3.6/site-packages/IPython/core/async_helpers.py", line 67, in _pseudo_sync_runner
coro.send(None)
File "/home/stefan/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3049, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File "/home/stefan/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3214, in run_ast_nodes
if (yield from self.run_code(code, result)):
File "/home/stefan/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3296, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-24-ab4fa17c6690>", line 3, in <module>
num_repeats=4, blend=0.2)
File "<ipython-input-17-f3924ae5544f>", line 59, in recursive_optimize
tile_size=tile_size)
File "<ipython-input-16-d6efbaa95b77>", line 30, in optimize_image
gradient = model.get_gradient(layer_tensor)
File "/home/stefan/Documents/TensorFlow-Tutorials-master/inception5h.py", line 167, in get_gradient
gradient = tf.gradients(tensor_mean, self.input)[0]
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py", line 664, in gradients
unconnected_gradients)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py", line 965, in _GradientsHelper
lambda: grad_fn(op, *out_grads))
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py", line 420, in _MaybeCompile
return grad_fn() # Exit early
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py", line 965, in <lambda>
lambda: grad_fn(op, *out_grads))
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py", line 444, in _SquareGrad
return math_ops.multiply(grad, math_ops.multiply(x, y))
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/util/dispatch.py", line 180, in wrapper
return target(*args, **kwargs)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py", line 248, in multiply
return gen_math_ops.mul(x, y, name)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py", line 5860, in mul
"Mul", x=x, y=y, name=name)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3300, in create_op
op_def=op_def)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1801, in __init__
self._traceback = tf_stack.extract_stack()
...which was originally created as op 'Square_6', defined at:
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
[elided 28 identical lines from previous traceback]
File "<ipython-input-16-d6efbaa95b77>", line 30, in optimize_image
gradient = model.get_gradient(layer_tensor)
File "/home/stefan/Documents/TensorFlow-Tutorials-master/inception5h.py", line 159, in get_gradient
tensor = tf.square(tensor)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py", line 9389, in square
"Square", x=x, name=name)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3300, in create_op
op_def=op_def)
File "/home/stefan/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1801, in __init__
self._traceback = tf_stack.extract_stack()
ResourceExhaustedError (see above for traceback): OOM when allocating tensor with shape[1,77,120,192] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[node gradients_6/Square_6_grad/Mul (defined at /home/stefan/Documents/TensorFlow-Tutorials-master/inception5h.py:167) ]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
[[node gradients_6/conv2d0_pre_relu/conv_grad/Conv2DBackpropInput (defined at /home/stefan/Documents/TensorFlow-Tutorials-master/inception5h.py:167) ]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
Is this an issue with the tensorflow implementation or am I just running out of ram?