diff --git a/dali/python/nvidia/dali/data_node.py b/dali/python/nvidia/dali/data_node.py index ebd8312dbb..3892a1ce5c 100644 --- a/dali/python/nvidia/dali/data_node.py +++ b/dali/python/nvidia/dali/data_node.py @@ -81,6 +81,7 @@ def gpu(self) -> DataNode: return self._to_backend("gpu") def cpu(self) -> DataNode: + self._check_gpu2cpu() return self._to_backend("cpu") # Note: Regardless of whether we want the cpu or gpu version @@ -259,6 +260,32 @@ def process_index(idx, dim): else: return nvidia.dali.fn.expand_dims(sliced, axes=new_axes, new_axis_names=new_axis_names) + def shape(self, *, dtype=None, device="cpu"): + """Returns the run-time shapes of this DataNode as a new DataNode + + Parameters + ---------- + arg_dtype : DALIDataType, optional + If specified, the shape will be converted to this data type; defaults to INT64. + device : str, optional + The device ("cpu" or "gpu") where the result is returned; defaults to CPU. + """ + from . import fn + + if device == "cpu": + self._check_gpu2cpu() + return fn.shapes(self, dtype=dtype, device=device) + + def _check_gpu2cpu(self): + if self.device == "gpu" and self.source and self.source.pipeline: + if not self.source.pipeline._exec_dynamic: + raise RuntimeError( + "This pipeline doesn't support transition from GPU to CPU.\n" + 'To enable GPU->CPU transitions, use the experimental "dynamic" executor.\n' + "Specify experimental_exec_dynamic=True in your Pipeline constructor or " + "@pipeline_def." + ) + not_iterable(DataNode) diff --git a/dali/test/python/test_pipeline.py b/dali/test/python/test_pipeline.py index 0a6456666e..a88473a1d9 100644 --- a/dali/test/python/test_pipeline.py +++ b/dali/test/python/test_pipeline.py @@ -2257,18 +2257,24 @@ def pdef(): enc, _ = fn.readers.file(file_root=jpeg_folder) img = fn.decoders.image(enc, device="mixed") peek = fn.peek_image_shape(enc) - return peek, fn.shapes(img, device="cpu"), fn.shapes(img.cpu()) + shapes_of_gpu = fn.shapes(img, device="cpu") + shapes_of_cpu = fn.shapes(img.cpu()) + return peek, shapes_of_gpu, shapes_of_cpu, img.shape(), img.cpu().shape() pipe = pdef() pipe.build() for i in range(10): - peek, shape_of_gpu, shape_of_cpu = pipe.run() + peek, shape_of_gpu, shape_of_cpu, shape_func_gpu, shape_func_cpu = pipe.run() # all results must be CPU tensor lists assert isinstance(peek, dali.backend_impl.TensorListCPU) assert isinstance(shape_of_gpu, dali.backend_impl.TensorListCPU) assert isinstance(shape_of_cpu, dali.backend_impl.TensorListCPU) + assert isinstance(shape_func_gpu, dali.backend_impl.TensorListCPU) + assert isinstance(shape_func_cpu, dali.backend_impl.TensorListCPU) check_batch(shape_of_gpu, peek, bs, 0, 0) check_batch(shape_of_cpu, peek, bs, 0, 0) + check_batch(shape_func_gpu, peek, bs, 0, 0) + check_batch(shape_func_cpu, peek, bs, 0, 0) def test_gpu2cpu_old_exec_error(): @@ -2282,11 +2288,15 @@ def test_gpu2cpu_old_exec_error(): exec_pipelined=False, experimental_exec_dynamic=False, ) - def pdef(): + def pdef(to_cpu): gpu = fn.external_source("input", device="gpu") - return gpu.cpu() + return to_cpu(gpu) + + with assert_raises(RuntimeError, glob="doesn't support transition from GPU to CPU"): + _ = pdef(lambda gpu: gpu.cpu()) # this will raise an error at construction time + + pipe = pdef(lambda gpu: gpu._to_backend("cpu")) # this will not raise errors until build-time - pipe = pdef() with assert_raises(RuntimeError, glob="doesn't support transition from GPU to CPU"): pipe.build()