From e8dfe5f5eef84b8ea6c6c2603f04ba107b6b665a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2024 06:16:06 +0000 Subject: [PATCH 1/2] Bump torch from 2.3.1+cpu to 2.4.0+cpu Bumps torch from 2.3.1+cpu to 2.4.0+cpu. --- updated-dependencies: - dependency-name: torch dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- poetry.lock | 70 +++++++++-------------------------------------------- 1 file changed, 12 insertions(+), 58 deletions(-) diff --git a/poetry.lock b/poetry.lock index c905bf59..2eae69ba 100644 --- a/poetry.lock +++ b/poetry.lock @@ -466,20 +466,6 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "intel-openmp" -version = "2021.4.0" -description = "Intel OpenMP* Runtime Library" -optional = false -python-versions = "*" -files = [ - {file = "intel_openmp-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:41c01e266a7fdb631a7609191709322da2bbf24b252ba763f125dd651bcc7675"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:3b921236a38384e2016f0f3d65af6732cf2c12918087128a9163225451e776f2"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:e2240ab8d01472fed04f3544a878cda5da16c26232b7ea1b59132dbfb48b186e"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:6e863d8fd3d7e8ef389d52cf97a50fe2afe1a19247e8c0d168ce021546f96fc9"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:eef4c8bcc8acefd7f5cd3b9384dbf73d59e2c99fc56545712ded913f43c4a94f"}, -] - [[package]] name = "jinja2" version = "3.1.4" @@ -593,24 +579,6 @@ files = [ {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, ] -[[package]] -name = "mkl" -version = "2021.4.0" -description = "IntelĀ® oneAPI Math Kernel Library" -optional = false -python-versions = "*" -files = [ - {file = "mkl-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:67460f5cd7e30e405b54d70d1ed3ca78118370b65f7327d495e9c8847705e2fb"}, - {file = "mkl-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:636d07d90e68ccc9630c654d47ce9fdeb036bb46e2b193b3a9ac8cfea683cce5"}, - {file = "mkl-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:398dbf2b0d12acaf54117a5210e8f191827f373d362d796091d161f610c1ebfb"}, - {file = "mkl-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:439c640b269a5668134e3dcbcea4350459c4a8bc46469669b2d67e07e3d330e8"}, - {file = "mkl-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:ceef3cafce4c009dd25f65d7ad0d833a0fbadc3d8903991ec92351fe5de1e718"}, -] - -[package.dependencies] -intel-openmp = "==2021.*" -tbb = "==2021.*" - [[package]] name = "mpmath" version = "1.3.0" @@ -1513,19 +1481,6 @@ files = [ [package.dependencies] mpmath = ">=0.19" -[[package]] -name = "tbb" -version = "2021.12.0" -description = "IntelĀ® oneAPI Threading Building Blocks (oneTBB)" -optional = false -python-versions = "*" -files = [ - {file = "tbb-2021.12.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:f2cc9a7f8ababaa506cbff796ce97c3bf91062ba521e15054394f773375d81d8"}, - {file = "tbb-2021.12.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:a925e9a7c77d3a46ae31c34b0bb7f801c4118e857d137b68f68a8e458fcf2bd7"}, - {file = "tbb-2021.12.0-py3-none-win32.whl", hash = "sha256:b1725b30c174048edc8be70bd43bb95473f396ce895d91151a474d0fa9f450a8"}, - {file = "tbb-2021.12.0-py3-none-win_amd64.whl", hash = "sha256:fc2772d850229f2f3df85f1109c4844c495a2db7433d38200959ee9265b34789"}, -] - [[package]] name = "toml" version = "0.10.2" @@ -1550,35 +1505,34 @@ files = [ [[package]] name = "torch" -version = "2.3.1+cpu" +version = "2.4.0+cpu" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.8.0" files = [ - {file = "torch-2.3.1+cpu-cp310-cp310-linux_x86_64.whl", hash = "sha256:d679e21d871982b9234444331a26350902cfd2d5ca44ce6f49896af8b3a3087d"}, - {file = "torch-2.3.1+cpu-cp310-cp310-win_amd64.whl", hash = "sha256:500bf790afc2fd374a15d06213242e517afccc50a46ea5955d321a9a68003335"}, - {file = "torch-2.3.1+cpu-cp311-cp311-linux_x86_64.whl", hash = "sha256:a272defe305dbd944aa28a91cc3db0f0149495b3ebec2e39723a7224fa05dc57"}, - {file = "torch-2.3.1+cpu-cp311-cp311-win_amd64.whl", hash = "sha256:d2965eb54d3c8818e2280a54bd53e8246a6bb34e4b10bd19c59f35b611dd9f05"}, - {file = "torch-2.3.1+cpu-cp312-cp312-linux_x86_64.whl", hash = "sha256:2141a6cb7021adf2f92a0fd372cfeac524ba460bd39ce3a641d30a561e41f69a"}, - {file = "torch-2.3.1+cpu-cp312-cp312-win_amd64.whl", hash = "sha256:6acdca2530462611095c44fd95af75ecd5b9646eac813452fe0adf31a9bc310a"}, - {file = "torch-2.3.1+cpu-cp38-cp38-linux_x86_64.whl", hash = "sha256:cab92d5101e6db686c5525e04d87cedbcf3a556073d71d07fbe7d1ce09630ffb"}, - {file = "torch-2.3.1+cpu-cp38-cp38-win_amd64.whl", hash = "sha256:dbc784569a367fd425158cf4ae82057dd3011185ba5fc68440432ba0562cb5b2"}, - {file = "torch-2.3.1+cpu-cp39-cp39-linux_x86_64.whl", hash = "sha256:a3cb8e61ba311cee1bb7463cbdcf3ebdfd071e2091e74c5785e3687eb02819f9"}, - {file = "torch-2.3.1+cpu-cp39-cp39-win_amd64.whl", hash = "sha256:df68668056e62c0332e03f43d9da5d4278b39df1ba58d30ec20d34242070955d"}, + {file = "torch-2.4.0+cpu-cp310-cp310-linux_x86_64.whl", hash = "sha256:0e59377b27823dda6d26528febb7ca06fc5b77816eaa58b4420cc8785e33d4ce"}, + {file = "torch-2.4.0+cpu-cp310-cp310-win_amd64.whl", hash = "sha256:53c3f75fa4ef0726e494ebef003b17d8a61c3c9fa4630b465610b462bf06c3de"}, + {file = "torch-2.4.0+cpu-cp311-cp311-linux_x86_64.whl", hash = "sha256:14a7a8b595347dddca594f9e448b93ce68ce4f871acbd32cf04bda7c03664c0c"}, + {file = "torch-2.4.0+cpu-cp311-cp311-win_amd64.whl", hash = "sha256:3b3cb9a6c17b5a4cea42bb37a243bfbad7659cef6d9b4ee29cb793bdf20f482c"}, + {file = "torch-2.4.0+cpu-cp312-cp312-linux_x86_64.whl", hash = "sha256:78dbf5f2789933a7ea2dabeead4daa44679b1e0d8eb35ddb7071c8ab7b181eb3"}, + {file = "torch-2.4.0+cpu-cp312-cp312-win_amd64.whl", hash = "sha256:f59c53a1c3247efb3700f9f78bdd289712177037a85b5519b9ecdef7c77c1fee"}, + {file = "torch-2.4.0+cpu-cp38-cp38-linux_x86_64.whl", hash = "sha256:08753c3d776ae49dc9ddbae02e26720a513a4dc7997e41d95392bca71623a0cd"}, + {file = "torch-2.4.0+cpu-cp38-cp38-win_amd64.whl", hash = "sha256:9f376f5a14eb04a44974c3a9dfd857a68090acb435b98e62bbf523baeefac85e"}, + {file = "torch-2.4.0+cpu-cp39-cp39-linux_x86_64.whl", hash = "sha256:040abaee8affa1bb0f3ca14ca693ba81d0d90d88df5b8a839af96933a7fa2d29"}, + {file = "torch-2.4.0+cpu-cp39-cp39-win_amd64.whl", hash = "sha256:441fbf517c46fee6782a4289ffe49f701d0a52e3533ab5397ce395da165d921d"}, ] [package.dependencies] filelock = "*" fsspec = "*" jinja2 = "*" -mkl = {version = ">=2021.1.1,<=2021.4.0", markers = "platform_system == \"Windows\""} networkx = "*" sympy = "*" typing-extensions = ">=4.8.0" [package.extras] opt-einsum = ["opt-einsum (>=3.3)"] -optree = ["optree (>=0.9.1)"] +optree = ["optree (>=0.11.0)"] [package.source] type = "legacy" From 38c604e868c215d1a74a242c4d99d5a5a352175e Mon Sep 17 00:00:00 2001 From: Thomas M Kehrenberg Date: Thu, 1 Aug 2024 15:42:18 +0200 Subject: [PATCH 2/2] Adapt to new type hints in pytorch --- ranzen/hydra/utils.py | 3 ++- ranzen/torch/optimizers/adafactor.py | 2 +- ranzen/torch/schedulers.py | 4 ++-- tests/optimizers_test.py | 2 +- tests/scheduler_test.py | 9 +++++---- 5 files changed, 11 insertions(+), 9 deletions(-) diff --git a/ranzen/hydra/utils.py b/ranzen/hydra/utils.py index 1d017040..7319a9eb 100644 --- a/ranzen/hydra/utils.py +++ b/ranzen/hydra/utils.py @@ -202,10 +202,11 @@ class Config: if (group := groups.get(entry.name)) is not None: for var_name, var_class in group.items(): if not issubclass(var_class, typ): # type: ignore + typ_name = typ.__name__ # type: ignore raise ValueError( f"All variants should be subclasses of their entry's type: type" f" `{var_class.__name__}` of variant `{entry.name}={var_name}` " - f"is not a subclass of `{typ.__name__}`." + f"is not a subclass of `{typ_name}`." ) else: raise ValueError( diff --git a/ranzen/torch/optimizers/adafactor.py b/ranzen/torch/optimizers/adafactor.py index 830c2597..546ee2ad 100644 --- a/ranzen/torch/optimizers/adafactor.py +++ b/ranzen/torch/optimizers/adafactor.py @@ -6,7 +6,7 @@ import torch from torch import Tensor -from torch.optim import Optimizer +from torch.optim.optimizer import Optimizer from .common import LossClosure diff --git a/ranzen/torch/schedulers.py b/ranzen/torch/schedulers.py index 7fc81621..6cd9383c 100644 --- a/ranzen/torch/schedulers.py +++ b/ranzen/torch/schedulers.py @@ -6,8 +6,8 @@ import torch from torch import Tensor -from torch.optim import Optimizer from torch.optim.lr_scheduler import CosineAnnealingLR, _LRScheduler +from torch.optim.optimizer import Optimizer __all__ = [ "CosineLRWithLinearWarmup", @@ -128,7 +128,7 @@ def scheduler(self) -> Union[LinearWarmupLR, CosineAnnealingLR]: self._scheduler = CosineAnnealingLR( optimizer=self.optimizer, T_max=self.total_iters - self.warmup_iters + 1, - eta_min=self.lr_min, + eta_min=self.lr_min, # type: ignore ) return self._scheduler diff --git a/tests/optimizers_test.py b/tests/optimizers_test.py index 800461b9..5565edb1 100644 --- a/tests/optimizers_test.py +++ b/tests/optimizers_test.py @@ -1,7 +1,7 @@ import pytest import torch from torch import Tensor -from torch.optim import AdamW +from torch.optim.adamw import AdamW from ranzen.torch.optimizers import Adafactor, LAMB, SAM diff --git a/tests/scheduler_test.py b/tests/scheduler_test.py index b625971f..2ea78a34 100644 --- a/tests/scheduler_test.py +++ b/tests/scheduler_test.py @@ -1,6 +1,7 @@ import pytest import torch -from torch import optim +from torch.optim.adamw import AdamW +from torch.optim.sgd import SGD from ranzen.torch.schedulers import LinearWarmupLR @@ -9,7 +10,7 @@ def test_linear_warmup_lr() -> None: params = (torch.randn(1, 1, requires_grad=True),) base_lr = 1.0 lr_start = 1.0e-1 - optimizer = optim.SGD(params, lr=base_lr) + optimizer = SGD(params, lr=base_lr) scheduler = LinearWarmupLR(optimizer=optimizer, lr_start=lr_start, warmup_iters=1) for group in optimizer.param_groups: assert group["lr"] == lr_start @@ -22,7 +23,7 @@ def _step(): for group in optimizer.param_groups: assert group["lr"] == base_lr - optimizer = optim.AdamW(params, lr=base_lr) + optimizer = AdamW(params, lr=base_lr) scheduler = LinearWarmupLR(optimizer=optimizer, lr_start=lr_start, warmup_iters=0) for group in optimizer.param_groups: assert group["lr"] == base_lr @@ -31,7 +32,7 @@ def _step(): for group in optimizer.param_groups: assert group["lr"] == base_lr - optimizer = optim.AdamW(params, lr=base_lr) + optimizer = AdamW(params, lr=base_lr) scheduler = LinearWarmupLR(optimizer=optimizer, lr_start=lr_start, warmup_iters=2) _step() expected_lr_after_one_step = lr_start + 0.5 * (base_lr - lr_start)