diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 45da02a4..700f27ee 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,6 +7,8 @@ ci: autoupdate_schedule: quarterly # submodules: true +exclude: '.*\.ipynb$' + repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 diff --git a/ci/run_premerge_cpu.sh b/ci/run_premerge_cpu.sh index 62ba4390..8215cdc4 100755 --- a/ci/run_premerge_cpu.sh +++ b/ci/run_premerge_cpu.sh @@ -39,6 +39,8 @@ remove_pipenv() { echo "removing pip environment" pipenv --rm rm Pipfile Pipfile.lock + pipenv --clear + df -h } verify_bundle() { diff --git a/ci/unit_tests/test_endoscopic_tool_segmentation.py b/ci/unit_tests/test_endoscopic_tool_segmentation.py new file mode 100644 index 00000000..be74e443 --- /dev/null +++ b/ci/unit_tests/test_endoscopic_tool_segmentation.py @@ -0,0 +1,108 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import PILWriter +from parameterized import parameterized +from utils import check_workflow + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "models/endoscopic_tool_segmentation", + "train#trainer#max_epochs": 2, + "train#dataloader#num_workers": 1, + "validate#dataloader#num_workers": 1, + "train#deterministic_transforms#3#spatial_size": [32, 32], + } +] + +TEST_CASE_2 = [ # inference + { + "bundle_root": "models/endoscopic_tool_segmentation", + "handlers#0#_disabled_": True, + "preprocessing#transforms#2#spatial_size": [32, 32], + } +] + + +class TestEndoscopicSeg(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + writer = PILWriter(np.uint8) + shape = (736, 480) + for mode in ["train", "val", "test"]: + for sub_folder in ["inbody", "outbody"]: + sample_dir = os.path.join(self.dataset_dir, f"{mode}/{sub_folder}") + os.makedirs(sample_dir) + for s in range(dataset_size): + image = np.random.randint(low=0, high=5, size=(3, *shape)).astype(np.int8) + image_filename = os.path.join(sample_dir, f"{sub_folder}_{s}.jpg") + writer.set_data_array(image, channel_dim=0) + writer.write(image_filename, verbose=True) + if mode != "test": + label = np.random.randint(low=0, high=5, size=shape).astype(np.int8) + label_filename = os.path.join(sample_dir, f"{sub_folder}_{s}_seg.jpg") + writer.set_data_array(label, channel_dim=None) + writer.write(label_filename, verbose=True) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_eval_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + trainer = ConfigWorkflow( + workflow="train", + config_file=train_file, + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=True) + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(validator, check_properties=True) + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/ci/unit_tests/test_endoscopic_tool_segmentation_dist.py b/ci/unit_tests/test_endoscopic_tool_segmentation_dist.py new file mode 100644 index 00000000..42a8d0ac --- /dev/null +++ b/ci/unit_tests/test_endoscopic_tool_segmentation_dist.py @@ -0,0 +1,119 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import torch +from monai.data import PILWriter +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +TEST_CASE_1 = [ + { + "bundle_root": "models/endoscopic_tool_segmentation", + "train#trainer#max_epochs": 1, + "train#dataloader#num_workers": 1, + "validate#dataloader#num_workers": 1, + "train#deterministic_transforms#3#spatial_size": [32, 32], + } +] + +TEST_CASE_2 = [ + { + "bundle_root": "models/endoscopic_tool_segmentation", + "validate#dataloader#num_workers": 4, + "train#deterministic_transforms#3#spatial_size": [32, 32], + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + if "infer" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestEndoscopicSegMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + writer = PILWriter(np.uint8) + shape = (3, 256, 256) + for mode in ["train", "val"]: + for sub_folder in ["inbody", "outbody"]: + sample_dir = os.path.join(self.dataset_dir, f"{mode}/{sub_folder}") + os.makedirs(sample_dir) + for s in range(dataset_size): + image = np.random.randint(low=0, high=5, size=(3, *shape)).astype(np.int8) + image_filename = os.path.join(sample_dir, f"{sub_folder}_{s}.jpg") + writer.set_data_array(image, channel_dim=0) + writer.write(image_filename, verbose=True) + label = np.random.randint(low=0, high=5, size=shape).astype(np.int8) + label_filename = os.path.join(sample_dir, f"{sub_folder}_{s}_seg.jpg") + writer.set_data_array(label, channel_dim=None) + writer.write(label_filename, verbose=True) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + @parameterized.expand([TEST_CASE_2]) + def test_evaluate_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + evaluate_file = os.path.join(bundle_root, "configs/evaluate.json") + mgpu_evaluate_file = os.path.join(bundle_root, "configs/multi_gpu_evaluate.json") + output_path = os.path.join(bundle_root, "configs/evaluate_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, evaluate_file, mgpu_evaluate_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_lung_nodule_ct_detection.py b/ci/unit_tests/test_lung_nodule_ct_detection.py new file mode 100644 index 00000000..aa091475 --- /dev/null +++ b/ci/unit_tests/test_lung_nodule_ct_detection.py @@ -0,0 +1,126 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import shutil +import sys +import tempfile +import unittest + +import nibabel as nib +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import create_test_image_3d +from monai.utils import set_determinism +from parameterized import parameterized +from utils import check_workflow + +set_determinism(123) + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "models/lung_nodule_ct_detection", + "epochs": 3, + "batch_size": 1, + "val_interval": 2, + "train#dataloader#num_workers": 1, + "validate#dataloader#num_workers": 1, + } +] + +TEST_CASE_2 = [{"bundle_root": "models/lung_nodule_ct_detection"}] # inference + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + if "infer" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestLungNoduleDetection(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 3 + train_patch_size = (192, 192, 80) + dataset_json = {} + + img, _ = create_test_image_3d(train_patch_size[0], train_patch_size[1], train_patch_size[2], 2) + image_filename = os.path.join(self.dataset_dir, "image.nii.gz") + nib.save(nib.Nifti1Image(img, np.eye(4)), image_filename) + label = [0, 0] + box = [[108, 119, 131, 142, 26, 37], [132, 147, 149, 164, 25, 40]] + data = {"box": box, "image": image_filename, "label": label} + dataset_json["training"] = [data for _ in range(dataset_size)] + dataset_json["validation"] = [data for _ in range(dataset_size)] + + self.ds_file = os.path.join(self.dataset_dir, "dataset.json") + with open(self.ds_file, "w") as fp: + json.dump(dataset_json, fp, indent=2) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_eval_config(self, override): + override["dataset_dir"] = self.dataset_dir + override["data_list_file_path"] = self.ds_file + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + sys.path.append(bundle_root) + trainer = ConfigWorkflow( + workflow="train", + config_file=train_file, + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=False) + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(validator, check_properties=False) + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.dataset_dir + override["data_list_file_path"] = self.ds_file + bundle_root = override["bundle_root"] + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_pathology_nuclei_classification.py b/ci/unit_tests/test_pathology_nuclei_classification.py new file mode 100644 index 00000000..736ab579 --- /dev/null +++ b/ci/unit_tests/test_pathology_nuclei_classification.py @@ -0,0 +1,139 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# Copyright (c) MONAI Consortium +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import subprocess +import sys +import tempfile +import unittest + +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import create_test_image_2d +from monai.utils import optional_import, set_determinism +from parameterized import parameterized +from utils import check_workflow + +savemat, _ = optional_import("scipy.io", name="savemat") +Image, _ = optional_import("PIL.Image") +set_determinism(10) + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "./models/pathology_nuclei_classification", + "train#trainer#max_epochs": 2, + "train#dataloader#batch_size": 1, + "train#dataloader#num_workers": 1, + "validate#dataloader#num_workers": 1, + "validate#dataloader#batch_size": 1, + } +] + +TEST_CASE_2 = [{"bundle_root": "./models/pathology_nuclei_classification", "handlers#0#_disabled_": True}] # inference + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + if "infer" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestNucleiCls(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + shape = (1000, 1000) + for sub_folder in ["Train", "Test"]: + sample_dir = os.path.join(self.dataset_dir, sub_folder) + image_dir = os.path.join(sample_dir, "Images") + label_dir = os.path.join(sample_dir, "Labels") + os.makedirs(image_dir) + os.makedirs(label_dir) + for s in range(dataset_size): + img, _ = create_test_image_2d(shape[0], shape[1], 200) + im = Image.fromarray(img * 255).convert("RGB") + image_filename = os.path.join(image_dir, f"{sub_folder}_{s}.png") + im.save(image_filename, "PNG") + + inst_type = np.random.randint(low=0, high=5, size=(100, 1)).astype(np.int8) + inst_centroid = np.random.randint(low=0, high=1000, size=(100, 2)).astype(np.int8) + label = { + "inst_map": (img * 255).astype(np.int8), + "type_map": (img * 255).astype(np.int8), + "inst_type": inst_type, + "inst_centroid": inst_centroid, + } + label_filename = os.path.join(label_dir, f"{sub_folder}_{s}.mat") + savemat(label_filename, label) + + prepare_datalist_file = "models/pathology_nuclei_classification/scripts/data_process.py" + self.output = os.path.join(self.dataset_dir, "CoNSePNuclei") + cmd = f"python {prepare_datalist_file} --input {self.dataset_dir} --output {self.output}" + call_status = subprocess.run(cmd, shell=True) + call_status.check_returncode() + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_eval_config(self, override): + override["dataset_dir"] = self.output + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + trainer = ConfigWorkflow( + workflow="train", + config_file=train_file, + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=True) + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(validator, check_properties=True) + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.output + bundle_root = override["bundle_root"] + + sys.path.append(bundle_root) + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_pathology_nuclei_classification_dist.py b/ci/unit_tests/test_pathology_nuclei_classification_dist.py new file mode 100644 index 00000000..fc8f58eb --- /dev/null +++ b/ci/unit_tests/test_pathology_nuclei_classification_dist.py @@ -0,0 +1,141 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import subprocess +import sys +import tempfile +import unittest + +import numpy as np +import torch +from monai.data import create_test_image_2d +from monai.utils import optional_import, set_determinism +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +savemat, _ = optional_import("scipy.io", name="savemat") +Image, _ = optional_import("PIL.Image") +set_determinism(123) + +TEST_CASE_1 = [ # mgpu train + { + "bundle_root": "./models/pathology_nuclei_classification", + "train#trainer#max_epochs": 2, + "train#dataloader#batch_size": 1, + "train#dataloader#num_workers": 1, + "validate#dataloader#num_workers": 1, + "validate#dataloader#batch_size": 1, + } +] + +TEST_CASE_2 = [ # mgpu evaluate + { + "bundle_root": "./models/pathology_nuclei_classification", + "train#trainer#max_epochs": 2, + "train#dataloader#batch_size": 1, + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + return 3 + + return get_order(test_name1) - get_order(test_name2) + + +class TestNucleiClsMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + self.output = os.path.join(self.dataset_dir, "CoNSePNuclei") + + dataset_size = 10 + shape = (1000, 1000) + for sub_folder in ["Train", "Test"]: + sample_dir = os.path.join(self.dataset_dir, sub_folder) + image_dir = os.path.join(sample_dir, "Images") + label_dir = os.path.join(sample_dir, "Labels") + os.makedirs(image_dir) + os.makedirs(label_dir) + for s in range(dataset_size): + img, _ = create_test_image_2d(shape[0], shape[1], 600) + im = Image.fromarray(img * 255).convert("RGB") + image_filename = os.path.join(image_dir, f"{sub_folder}_{s}.png") + im.save(image_filename, "PNG") + + inst_type = np.random.randint(low=0, high=5, size=(500, 1)).astype(np.int8) + inst_centroid = np.random.randint(low=0, high=1000, size=(500, 2)).astype(np.int8) + label = { + "inst_map": (img * 255).astype(np.int8), + "type_map": (img * 255).astype(np.int8), + "inst_type": inst_type, + "inst_centroid": inst_centroid, + } + label_filename = os.path.join(label_dir, f"{sub_folder}_{s}.mat") + savemat(label_filename, label) + + prepare_datalist_file = "models/pathology_nuclei_classification/scripts/data_process.py" + cmd = f"python {prepare_datalist_file} --input {self.dataset_dir} --output {self.output}" + call_status = subprocess.run(cmd, shell=True) + call_status.check_returncode() + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.output + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + @parameterized.expand([TEST_CASE_2]) + def test_evaluate_mgpu_config(self, override): + override["dataset_dir"] = self.output + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + evaluate_file = os.path.join(bundle_root, "configs/evaluate.json") + mgpu_evaluate_file = os.path.join(bundle_root, "configs/multi_gpu_evaluate.json") + output_path = os.path.join(bundle_root, "configs/evaluate_override.json") + n_gpu = torch.cuda.device_count() + sys.path.append(bundle_root) + export_config_and_run_mgpu_cmd( + config_file=[train_file, evaluate_file, mgpu_evaluate_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_pathology_nuclei_segmentation_classification.py b/ci/unit_tests/test_pathology_nuclei_segmentation_classification.py new file mode 100644 index 00000000..b7454d95 --- /dev/null +++ b/ci/unit_tests/test_pathology_nuclei_segmentation_classification.py @@ -0,0 +1,124 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import create_test_image_2d +from parameterized import parameterized +from PIL import Image +from utils import check_workflow + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "models/pathology_nuclei_segmentation_classification", + "epochs": 1, + "batch_size": 3, + "train#dataloader#num_workers": 1, + "validate#dataloader#num_workers": 1, + } +] + +TEST_CASE_2 = [ # inference + {"bundle_root": "models/pathology_nuclei_segmentation_classification", "handlers#0#_disabled_": True} +] + + +class TestNucleiSegCls(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + # train + for mode in ["Train", "Test"]: + train_sample_dir = os.path.join(self.dataset_dir, mode) + os.makedirs(train_sample_dir) + for s in range(dataset_size): + for image_suffix in ["image", "inst_map", "type_map"]: + if image_suffix == "image": + shape = (256, 256, 3) + else: + shape = (256, 256, 1) + test_image = np.random.randint(low=0, high=2, size=shape).astype(np.int8) + image_filename = os.path.join(train_sample_dir, f"{s}_{image_suffix}.npy") + np.save(image_filename, test_image) + + # evaluate + evaluate_sample_dir = os.path.join(self.dataset_dir, "Test") + for s in range(int(dataset_size / 5)): + for image_prefix in ["image", "label"]: + if image_prefix == "image": + shape = (256, 256, 3) + else: + shape = (256, 256, 2) + test_image = np.random.randint(low=0, high=2, size=shape).astype(np.int8) + image_filename = os.path.join(evaluate_sample_dir, f"{image_prefix}_{s}.npy") + np.save(image_filename, test_image) + + # inference + self.infer_sample_dir = os.path.join(self.dataset_dir, "Images") + os.makedirs(self.infer_sample_dir) + for s in range(int(dataset_size / 5)): + img, _ = create_test_image_2d(1000, 1000) + im = Image.fromarray(img).convert("RGB") + # img = Image.new("RGB", (1000, 1000)) + image_filename = os.path.join(self.infer_sample_dir, f"image_{s}.png") + im.save(image_filename, "PNG") + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_eval_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + trainer = ConfigWorkflow( + workflow="train", + config_file=train_file, + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=False) + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(validator, check_properties=False) + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.infer_sample_dir + bundle_root = override["bundle_root"] + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/ci/unit_tests/test_pathology_nuclei_segmentation_classification_dist.py b/ci/unit_tests/test_pathology_nuclei_segmentation_classification_dist.py new file mode 100644 index 00000000..274f9ad2 --- /dev/null +++ b/ci/unit_tests/test_pathology_nuclei_segmentation_classification_dist.py @@ -0,0 +1,74 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import torch +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +TEST_CASE_1 = [ # mgpu train + { + "bundle_root": "models/pathology_nuclei_segmentation_classification", + "epochs": 1, + "batch_size": 3, + "train#dataloader#num_workers": 1, + "validate#dataloader#num_workers": 1, + } +] + + +class TestNucleiSegClsMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + # train + for mode in ["Train", "Test"]: + train_sample_dir = os.path.join(self.dataset_dir, mode) + os.makedirs(train_sample_dir) + for s in range(dataset_size): + for image_suffix in ["image", "inst_map", "type_map"]: + if image_suffix == "image": + shape = (256, 256, 3) + else: + shape = (256, 256, 1) + test_image = np.random.randint(low=0, high=2, size=shape).astype(np.int8) + image_filename = os.path.join(train_sample_dir, f"{s}_{image_suffix}.npy") + np.save(image_filename, test_image) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/ci/unit_tests/test_pathology_nuclick_annotation.py b/ci/unit_tests/test_pathology_nuclick_annotation.py new file mode 100644 index 00000000..34cd9a45 --- /dev/null +++ b/ci/unit_tests/test_pathology_nuclick_annotation.py @@ -0,0 +1,139 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# Copyright (c) MONAI Consortium +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import subprocess +import sys +import tempfile +import unittest + +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import create_test_image_2d +from monai.utils import optional_import, set_determinism +from parameterized import parameterized +from utils import check_workflow + +savemat, _ = optional_import("scipy.io", name="savemat") +Image, _ = optional_import("PIL.Image") +set_determinism(10) + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "./models/pathology_nuclick_annotation", + "train#trainer#max_epochs": 2, + "train#dataloader#batch_size": 1, + "train#dataloader#num_workers": 1, + "validate#dataloader#num_workers": 1, + "validate#dataloader#batch_size": 1, + } +] + +TEST_CASE_2 = [{"bundle_root": "./models/pathology_nuclick_annotation"}] # inference + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + if "infer" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestNuclickAnnotation(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + shape = (1000, 1000) + for sub_folder in ["Train", "Test"]: + sample_dir = os.path.join(self.dataset_dir, sub_folder) + image_dir = os.path.join(sample_dir, "Images") + label_dir = os.path.join(sample_dir, "Labels") + os.makedirs(image_dir) + os.makedirs(label_dir) + for s in range(dataset_size): + img, _ = create_test_image_2d(shape[0], shape[1], 200) + im = Image.fromarray(img * 255).convert("RGB") + image_filename = os.path.join(image_dir, f"{sub_folder}_{s}.png") + im.save(image_filename, "PNG") + + inst_type = np.random.randint(low=0, high=5, size=(100, 1)).astype(np.int8) + inst_centroid = np.random.randint(low=0, high=1000, size=(100, 2)).astype(np.int8) + label = { + "inst_map": (img * 255).astype(np.int8), + "type_map": (img * 255).astype(np.int8), + "inst_type": inst_type, + "inst_centroid": inst_centroid, + } + label_filename = os.path.join(label_dir, f"{sub_folder}_{s}.mat") + savemat(label_filename, label) + + prepare_datalist_file = "models/pathology_nuclick_annotation/scripts/data_process.py" + self.output = os.path.join(self.dataset_dir, "CoNSePNuclei") + cmd = f"python {prepare_datalist_file} --input {self.dataset_dir} --output {self.output}" + call_status = subprocess.run(cmd, shell=True) + call_status.check_returncode() + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_eval_config(self, override): + override["dataset_dir"] = self.output + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + trainer = ConfigWorkflow( + workflow="train", + config_file=train_file, + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=True) + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(validator, check_properties=True) + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.output + bundle_root = override["bundle_root"] + + sys.path.append(bundle_root) + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_pathology_nuclick_annotation_dist.py b/ci/unit_tests/test_pathology_nuclick_annotation_dist.py new file mode 100644 index 00000000..74496e2c --- /dev/null +++ b/ci/unit_tests/test_pathology_nuclick_annotation_dist.py @@ -0,0 +1,141 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import subprocess +import sys +import tempfile +import unittest + +import numpy as np +import torch +from monai.data import create_test_image_2d +from monai.utils import optional_import, set_determinism +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +savemat, _ = optional_import("scipy.io", name="savemat") +Image, _ = optional_import("PIL.Image") +set_determinism(123) + +TEST_CASE_1 = [ # mgpu train + { + "bundle_root": "./models/pathology_nuclick_annotation", + "train#trainer#max_epochs": 2, + "train#dataloader#batch_size": 1, + "train#dataloader#num_workers": 1, + "validate#dataloader#num_workers": 1, + "validate#dataloader#batch_size": 1, + } +] + +TEST_CASE_2 = [ # mgpu evaluate + { + "bundle_root": "./models/pathology_nuclick_annotation", + "train#trainer#max_epochs": 2, + "train#dataloader#batch_size": 1, + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + return 3 + + return get_order(test_name1) - get_order(test_name2) + + +class TestNuclickAnnotationMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + self.output = os.path.join(self.dataset_dir, "CoNSePNuclei") + + dataset_size = 10 + shape = (1000, 1000) + for sub_folder in ["Train", "Test"]: + sample_dir = os.path.join(self.dataset_dir, sub_folder) + image_dir = os.path.join(sample_dir, "Images") + label_dir = os.path.join(sample_dir, "Labels") + os.makedirs(image_dir) + os.makedirs(label_dir) + for s in range(dataset_size): + img, _ = create_test_image_2d(shape[0], shape[1], 600) + im = Image.fromarray(img * 255).convert("RGB") + image_filename = os.path.join(image_dir, f"{sub_folder}_{s}.png") + im.save(image_filename, "PNG") + + inst_type = np.random.randint(low=0, high=5, size=(500, 1)).astype(np.int8) + inst_centroid = np.random.randint(low=0, high=1000, size=(500, 2)).astype(np.int8) + label = { + "inst_map": (img * 255).astype(np.int8), + "type_map": (img * 255).astype(np.int8), + "inst_type": inst_type, + "inst_centroid": inst_centroid, + } + label_filename = os.path.join(label_dir, f"{sub_folder}_{s}.mat") + savemat(label_filename, label) + + prepare_datalist_file = "models/pathology_nuclick_annotation/scripts/data_process.py" + cmd = f"python {prepare_datalist_file} --input {self.dataset_dir} --output {self.output}" + call_status = subprocess.run(cmd, shell=True) + call_status.check_returncode() + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.output + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + @parameterized.expand([TEST_CASE_2]) + def test_evaluate_mgpu_config(self, override): + override["dataset_dir"] = self.output + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + evaluate_file = os.path.join(bundle_root, "configs/evaluate.json") + mgpu_evaluate_file = os.path.join(bundle_root, "configs/multi_gpu_evaluate.json") + output_path = os.path.join(bundle_root, "configs/evaluate_override.json") + n_gpu = torch.cuda.device_count() + sys.path.append(bundle_root) + export_config_and_run_mgpu_cmd( + config_file=[train_file, evaluate_file, mgpu_evaluate_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_wholeBody_ct_segmentation.py b/ci/unit_tests/test_wholeBody_ct_segmentation.py new file mode 100644 index 00000000..0001b8f3 --- /dev/null +++ b/ci/unit_tests/test_wholeBody_ct_segmentation.py @@ -0,0 +1,126 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import ITKWriter +from parameterized import parameterized +from utils import check_workflow + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "models/wholeBody_ct_segmentation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "val_interval": 1, + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "train#dataloader#batch_size": 1, + } +] + +TEST_CASE_2 = [ # inference + { + "bundle_root": "models/wholeBody_ct_segmentation", + "datalist": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + if "infer" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestWholeBodySeg(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 12 + input_shape = (96, 96, 96) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=14, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + trainer = ConfigWorkflow( + workflow="train", + config_file=os.path.join(bundle_root, "configs/train.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=True) + + @parameterized.expand([TEST_CASE_1]) + def test_eval_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(validator, check_properties=True) + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_wholeBody_ct_segmentation_dist.py b/ci/unit_tests/test_wholeBody_ct_segmentation_dist.py new file mode 100644 index 00000000..fe9c9168 --- /dev/null +++ b/ci/unit_tests/test_wholeBody_ct_segmentation_dist.py @@ -0,0 +1,96 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import torch +from monai.data import ITKWriter +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +TEST_CASE_1 = [ + { + "bundle_root": "models/wholeBody_ct_segmentation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "val_interval": 1, + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "train#dataloader#batch_size": 1, + } +] + + +class TestWholeBodySegMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 12 + input_shape = (96, 96, 96) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=14, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + @parameterized.expand([TEST_CASE_1]) + def test_evaluate_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + evaluate_file = os.path.join(bundle_root, "configs/train.json") + mgpu_evaluate_file = os.path.join(bundle_root, "configs/multi_gpu_evaluate.json") + output_path = os.path.join(bundle_root, "configs/evaluate_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, evaluate_file, mgpu_evaluate_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/models/model_info.json b/models/model_info.json index ca845f9c..ffaaa093 100644 --- a/models/model_info.json +++ b/models/model_info.json @@ -1210,5 +1210,13 @@ "spleen_deepedit_annotation_v0.4.8": { "checksum": "dec1cfc238bb9c362fe10ff0c38d6f05e49fd5bc", "source": "https://api.ngc.nvidia.com/v2/models/nvidia/monaihosting/spleen_deepedit_annotation/versions/0.4.8/files/spleen_deepedit_annotation_v0.4.8.zip" + }, + "prostate_mri_anatomy_v0.3.3": { + "checksum": "dcefa44bb2ed0e027f6f49ba5ea98e8093c8ef20", + "source": "https://api.ngc.nvidia.com/v2/models/nvidia/monaihosting/prostate_mri_anatomy/versions/0.3.3/files/prostate_mri_anatomy_v0.3.3.zip" + }, + "pathology_nuclei_segmentation_classification_v0.2.2": { + "checksum": "30b7ac6aafe10f7fffb417b6f4f959a03debee6f", + "source": "https://api.ngc.nvidia.com/v2/models/nvidia/monaihosting/pathology_nuclei_segmentation_classification/versions/0.2.2/files/pathology_nuclei_segmentation_classification_v0.2.2.zip" } } diff --git a/models/pathology_nuclei_segmentation_classification/configs/metadata.json b/models/pathology_nuclei_segmentation_classification/configs/metadata.json index 886490ff..dbff4483 100644 --- a/models/pathology_nuclei_segmentation_classification/configs/metadata.json +++ b/models/pathology_nuclei_segmentation_classification/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hovernet_20221124.json", - "version": "0.2.1", + "version": "0.2.2", "changelog": { + "0.2.2": "add requiremnts for torchvision", "0.2.1": "fix the wrong GPU index issue of multi-node", "0.2.0": "Update README for how to download dataset", "0.1.9": "add RAM warning", @@ -20,6 +21,7 @@ "numpy_version": "1.22.2", "optional_packages_version": { "scikit-image": "0.19.3", + "torchvision": "0.14.1", "scipy": "1.8.1", "tqdm": "4.64.1", "pillow": "9.0.1" diff --git a/models/prostate_mri_anatomy/configs/inference.json b/models/prostate_mri_anatomy/configs/inference.json index efdc02c5..d43ead87 100644 --- a/models/prostate_mri_anatomy/configs/inference.json +++ b/models/prostate_mri_anatomy/configs/inference.json @@ -5,8 +5,8 @@ ], "bundle_root": "/workspace/data/prostate_mri_anatomy", "output_dir": "$@bundle_root + '/eval'", - "dataset_dir": "/workspace/data/prostate158/prostate158_train/", - "datalist": "$list(@dataset_dir + pd.read_csv(@dataset_dir + 'valid.csv').t2)", + "dataset_dir": "/workspace/data/prostate158/prostate158_test/", + "datalist": "$list(@dataset_dir + pd.read_csv(@dataset_dir + 'test.csv').t2)", "device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", "network_def": { "_target_": "UNet", @@ -114,6 +114,15 @@ 2 ] }, + { + "_target_": "Invertd", + "keys": "pred", + "transform": "@preprocessing", + "orig_keys": "image", + "meta_key_postfix": "meta_dict", + "nearest_interp": false, + "to_tensor": true + }, { "_target_": "SaveImaged", "keys": "pred", @@ -127,6 +136,10 @@ { "_target_": "CheckpointLoader", "load_path": "$@bundle_root + '/models/model.pt'", + "map_location": { + "_target_": "torch.device", + "device": "@device" + }, "load_dict": { "model": "@network" } diff --git a/models/prostate_mri_anatomy/configs/metadata.json b/models/prostate_mri_anatomy/configs/metadata.json index e8d2978a..ac024c80 100644 --- a/models/prostate_mri_anatomy/configs/metadata.json +++ b/models/prostate_mri_anatomy/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.3.2", + "version": "0.3.3", "changelog": { + "0.3.3": "add invertd transformation", "0.3.2": "add name tag", "0.3.1": "fix license Copyright error", "0.3.0": "update license files", @@ -14,7 +15,7 @@ "numpy_version": "1.22.3", "optional_packages_version": { "nibabel": "3.2.2", - "itk": "5.2.1", + "itk": "5.3", "pytorch-ignite": "0.4.9", "pandas": "1.4.2" },