diff --git a/.github/workflows/premerge-cpu.yml b/.github/workflows/premerge-cpu.yml index 4bd1215f..e46de1ca 100644 --- a/.github/workflows/premerge-cpu.yml +++ b/.github/workflows/premerge-cpu.yml @@ -17,10 +17,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - - name: Set up Python 3.8 + - name: Set up Python 3.9 uses: actions/setup-python@v2 with: - python-version: 3.8 + python-version: 3.9 - name: cache weekly timestamp id: pip-cache run: | @@ -34,8 +34,6 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip wheel - python -m pip install -r requirements-dev.txt - python -m pip install cffi && python -m pip install pipenv - name: check run: | # clean up temporary files diff --git a/ci/get_bundle_list.py b/ci/get_bundle_list.py index 3e6d5393..440aeb54 100644 --- a/ci/get_bundle_list.py +++ b/ci/get_bundle_list.py @@ -15,7 +15,7 @@ from utils import get_sub_folders # new added bundles should temporarily be added to this list, and remove until they can be downloaded successfully -EXCLUDE_LIST = ["segmentation_template", "classification_template"] +EXCLUDE_LIST = [] def main(models_path): diff --git a/ci/run_premerge_cpu.sh b/ci/run_premerge_cpu.sh index 4950bd7d..11a233fe 100755 --- a/ci/run_premerge_cpu.sh +++ b/ci/run_premerge_cpu.sh @@ -30,7 +30,11 @@ elif [[ $# -gt 1 ]]; then fi verify_bundle() { - rm -rf /opt/hostedtoolcache + for dir in /opt/hostedtoolcache/*; do + if [[ $dir != "/opt/hostedtoolcache/Python" ]]; then + rm -rf "$dir" + fi + done echo 'Run verify bundle...' pip install -r requirements.txt head_ref=$(git rev-parse HEAD) diff --git a/ci/unit_tests/test_spleen_deepedit_annotation.py b/ci/unit_tests/test_spleen_deepedit_annotation.py index fa5da6fa..4210d92b 100644 --- a/ci/unit_tests/test_spleen_deepedit_annotation.py +++ b/ci/unit_tests/test_spleen_deepedit_annotation.py @@ -124,9 +124,9 @@ def test_infer_config(self, override): @parameterized.expand([TEST_CASE_2]) def test_infer_click_config(self, override): override["dataset_dir"] = self.dataset_dir - override[ - "dataset#data" - ] = "$[{'image': i, 'background': [], 'spleen': [[6, 6, 6], [8, 8, 8]]} for i in @datalist]" + override["dataset#data"] = ( + "$[{'image': i, 'background': [], 'spleen': [[6, 6, 6], [8, 8, 8]]} for i in @datalist]" + ) bundle_root = override["bundle_root"] sys.path = [bundle_root] + sys.path diff --git a/models/renalStructures_UNEST_segmentation/configs/metadata.json b/models/renalStructures_UNEST_segmentation/configs/metadata.json index 06c84f20..c2cf410a 100644 --- a/models/renalStructures_UNEST_segmentation/configs/metadata.json +++ b/models/renalStructures_UNEST_segmentation/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.2.3", + "version": "0.2.4", "changelog": { + "0.2.4": "fix black 24.1 format error", "0.2.3": "update AddChanneld with EnsureChannelFirstd and remove meta_dict", "0.2.2": "add name tag", "0.2.1": "fix license Copyright error", diff --git a/models/renalStructures_UNEST_segmentation/scripts/networks/nest/utils.py b/models/renalStructures_UNEST_segmentation/scripts/networks/nest/utils.py index 70828c04..211283d3 100755 --- a/models/renalStructures_UNEST_segmentation/scripts/networks/nest/utils.py +++ b/models/renalStructures_UNEST_segmentation/scripts/networks/nest/utils.py @@ -37,9 +37,7 @@ def drop_block_2d( total_size = w * h clipped_block_size = min(block_size, min(w, h)) # seed_drop_rate, the gamma parameter - gamma = ( - gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((w - block_size + 1) * (h - block_size + 1)) - ) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((w - block_size + 1) * (h - block_size + 1)) # Forces the block to be inside the feature map. w_i, h_i = torch.meshgrid(torch.arange(w).to(x.device), torch.arange(h).to(x.device)) @@ -89,9 +87,7 @@ def drop_block_fast_2d( b, c, h, w = x.shape total_size = w * h clipped_block_size = min(block_size, min(w, h)) - gamma = ( - gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((w - block_size + 1) * (h - block_size + 1)) - ) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((w - block_size + 1) * (h - block_size + 1)) block_mask = torch.empty_like(x).bernoulli_(gamma) block_mask = F.max_pool2d( diff --git a/models/wholeBrainSeg_Large_UNEST_segmentation/configs/metadata.json b/models/wholeBrainSeg_Large_UNEST_segmentation/configs/metadata.json index 69cb257b..9489b50b 100644 --- a/models/wholeBrainSeg_Large_UNEST_segmentation/configs/metadata.json +++ b/models/wholeBrainSeg_Large_UNEST_segmentation/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.2.3", + "version": "0.2.4", "changelog": { + "0.2.4": "fix black 24.1 format error", "0.2.3": "fix PYTHONPATH in readme.md", "0.2.2": "add name tag", "0.2.1": "fix license Copyright error", diff --git a/models/wholeBrainSeg_Large_UNEST_segmentation/scripts/networks/nest/utils.py b/models/wholeBrainSeg_Large_UNEST_segmentation/scripts/networks/nest/utils.py index 70828c04..211283d3 100755 --- a/models/wholeBrainSeg_Large_UNEST_segmentation/scripts/networks/nest/utils.py +++ b/models/wholeBrainSeg_Large_UNEST_segmentation/scripts/networks/nest/utils.py @@ -37,9 +37,7 @@ def drop_block_2d( total_size = w * h clipped_block_size = min(block_size, min(w, h)) # seed_drop_rate, the gamma parameter - gamma = ( - gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((w - block_size + 1) * (h - block_size + 1)) - ) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((w - block_size + 1) * (h - block_size + 1)) # Forces the block to be inside the feature map. w_i, h_i = torch.meshgrid(torch.arange(w).to(x.device), torch.arange(h).to(x.device)) @@ -89,9 +87,7 @@ def drop_block_fast_2d( b, c, h, w = x.shape total_size = w * h clipped_block_size = min(block_size, min(w, h)) - gamma = ( - gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((w - block_size + 1) * (h - block_size + 1)) - ) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((w - block_size + 1) * (h - block_size + 1)) block_mask = torch.empty_like(x).bernoulli_(gamma) block_mask = F.max_pool2d(