Skip to content

Commit

Permalink
Prevent inf recursion as per 1892
Browse files Browse the repository at this point in the history
  • Loading branch information
EricLBuehler committed Jul 3, 2024
1 parent 13edc61 commit 77fb6b0
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 1 deletion.
2 changes: 1 addition & 1 deletion src/peft/tuners/lora/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -905,4 +905,4 @@ def subtract_mutated_init(self, output_state_dict: dict[str, torch.Tensor], adap
[output_state_dict[name], -mutated_init_state_dict[".".join(name.split(".")[1:])]], dim=1
)

return tensors_lora
return tensors_lora
2 changes: 2 additions & 0 deletions src/peft/tuners/xlora/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,8 @@ def __getattr__(self, name: str):
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "base_model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.lora_model, name)

@staticmethod
Expand Down

0 comments on commit 77fb6b0

Please sign in to comment.