From fc565d7294f804ef2b6d1947897b34659e017192 Mon Sep 17 00:00:00 2001 From: svlandeg Date: Tue, 30 Dec 2025 11:12:37 +0100 Subject: [PATCH] refactor part 2 --- scripts/chat_sft.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/chat_sft.py b/scripts/chat_sft.py index 3bf38ae..76ce822 100644 --- a/scripts/chat_sft.py +++ b/scripts/chat_sft.py @@ -225,9 +225,6 @@ for step in range(num_iterations): train_loss = loss.detach() # for logging loss = loss / grad_accum_steps # each .backward() is a grad sum => normalize loss here loss.backward() # accumulate the gradient - num_tokens += (train_targets >= 0).sum() - if ddp: - dist.all_reduce(num_tokens, op=dist.ReduceOp.SUM) # sum over ranks # learning rate scheduler lrm = get_lr_multiplier(step)