From 77fb6b02cc883409191f41a518c759ea63495665 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Wed, 3 Jul 2024 12:12:30 -0400 Subject: [PATCH] Prevent inf recursion as per 1892 --- src/peft/tuners/lora/model.py | 2 +- src/peft/tuners/xlora/model.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/peft/tuners/lora/model.py b/src/peft/tuners/lora/model.py index 2ba92af494..692dda7b9a 100644 --- a/src/peft/tuners/lora/model.py +++ b/src/peft/tuners/lora/model.py @@ -905,4 +905,4 @@ def subtract_mutated_init(self, output_state_dict: dict[str, torch.Tensor], adap [output_state_dict[name], -mutated_init_state_dict[".".join(name.split(".")[1:])]], dim=1 ) - return tensors_lora \ No newline at end of file + return tensors_lora diff --git a/src/peft/tuners/xlora/model.py b/src/peft/tuners/xlora/model.py index 48101482d3..c7f83d1497 100644 --- a/src/peft/tuners/xlora/model.py +++ b/src/peft/tuners/xlora/model.py @@ -277,6 +277,8 @@ def __getattr__(self, name: str): try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: + if name == "base_model": # see #1892: prevent infinite recursion if class is not initialized + raise return getattr(self.lora_model, name) @staticmethod