diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 1b657fcd0..d657617b4 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -6,7 +6,6 @@ channels: - conda-forge - nvidia dependencies: -- GPUtil>=1.4.0 - c-compiler - click - cmake>=3.26.4,!=3.30.0 diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index 2f5f50192..a193f5cdd 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -6,7 +6,6 @@ channels: - conda-forge - nvidia dependencies: -- GPUtil>=1.4.0 - c-compiler - click - cmake>=3.26.4,!=3.30.0 diff --git a/dependencies.yaml b/dependencies.yaml index e8d5f8aaf..62e2ddefe 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -295,7 +295,6 @@ dependencies: common: - output_types: [conda, requirements, pyproject] packages: - - GPUtil>=1.4.0 - psutil>=5.8.0 - pytest>=6.2.4,<8.0.0a0 - pytest-cov>=2.12.1 diff --git a/python/cucim/pyproject.toml b/python/cucim/pyproject.toml index 4537f3df6..25248b3b2 100644 --- a/python/cucim/pyproject.toml +++ b/python/cucim/pyproject.toml @@ -57,7 +57,6 @@ Tracker = "https://github.com/rapidsai/cucim/issues" [project.optional-dependencies] test = [ - "GPUtil>=1.4.0", "imagecodecs>=2021.6.8; platform_machine=='x86_64'", "matplotlib", "numpydoc>=1.5", diff --git a/python/cucim/tests/performance/clara/test_read_region_memory_usage.py b/python/cucim/tests/performance/clara/test_read_region_memory_usage.py index 8f259b910..9b17b1549 100644 --- a/python/cucim/tests/performance/clara/test_read_region_memory_usage.py +++ b/python/cucim/tests/performance/clara/test_read_region_memory_usage.py @@ -13,6 +13,7 @@ # limitations under the License. # +import cupy as cp import pytest from ...util.io import open_image_cucim @@ -22,23 +23,24 @@ def test_read_region_cuda_memleak(testimg_tiff_stripe_4096x4096_256_jpeg): - import GPUtil - - gpus = GPUtil.getGPUs() - - if len(gpus) == 0: + def get_used_gpu_memory_mib(): + """Get the used GPU memory in MiB.""" + dev = cp.cuda.Device() + free, total = dev.mem_info + memory_used = (total - free) / (2**20) + return memory_used + + num_gpus = cp.cuda.runtime.getDeviceCount() + if num_gpus == 0: pytest.skip("No gpu available") img = open_image_cucim(testimg_tiff_stripe_4096x4096_256_jpeg) - gpu = gpus[0] - mem_usage_history = [gpu.memoryUsed] + mem_usage_history = [get_used_gpu_memory_mib()] for i in range(10): _ = img.read_region(device="cuda") - gpus = GPUtil.getGPUs() - gpu = gpus[0] - mem_usage_history.append(gpu.memoryUsed) + mem_usage_history.append(get_used_gpu_memory_mib()) print(mem_usage_history)