Skip to content

Commit

Permalink
Update megatron_gpt_dpo_model.py
Browse files Browse the repository at this point in the history
Undo changes.
  • Loading branch information
akoumpa authored Nov 8, 2024
1 parent 6ac0099 commit 6ddddd2
Showing 1 changed file with 0 additions and 10 deletions.
10 changes: 0 additions & 10 deletions nemo_aligner/models/nlp/gpt/megatron_gpt_dpo_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,16 +145,6 @@ def fwd_output_and_loss_func(dataloader_iter, model, checkpoint_activations_all_
# position_ids = batch["position_ids"][0:1]
attention_mask = batch["attention_mask"][0:1]

pad_len = 0
world_size = torch.distributed.get_world_size()
if tokens.shape[1] % world_size != 0:
pad_len = world_size - tokens.shape[1] % world_size
with torch.no_grad():
import torch.nn.functional as F

tokens = F.pad(tokens, [0, pad_len, 0, 0], value=0)
labels = F.pad(labels, [0, pad_len, 0, 0], value=0)

# Model forward pass
forward_args = {
"input_ids": tokens,
Expand Down

0 comments on commit 6ddddd2

Please sign in to comment.