Add guard against division by zero in chat_sft when num_tokens is 0

This commit is contained in:
kibitzing 2025-10-15 13:24:00 +00:00
parent f5001141ec
commit 42b05eea7e

View File

@ -221,10 +221,16 @@ for step in range(num_iterations):
dist.all_reduce(total_loss_sum, op=dist.ReduceOp.SUM) dist.all_reduce(total_loss_sum, op=dist.ReduceOp.SUM)
dist.all_reduce(num_tokens, op=dist.ReduceOp.SUM) # sum over ranks dist.all_reduce(num_tokens, op=dist.ReduceOp.SUM) # sum over ranks
# Scale gradients by total number of tokens # scale gradients by total number of tokens
num_tokens_item = num_tokens.item()
if num_tokens_item == 0:
print0(f"Warning: the number of valid tokens in train targets is 0 at step {step}, skipping model update")
model.zero_grad(set_to_none=True)
continue
for param in model.parameters(): for param in model.parameters():
if param.grad is not None: if param.grad is not None:
param.grad.div_(num_tokens.item()) param.grad.div_(num_tokens_item)
# learning rate scheduler # learning rate scheduler
lrm = get_lr_multiplier(step) lrm = get_lr_multiplier(step)
@ -238,7 +244,6 @@ for step in range(num_iterations):
model.zero_grad(set_to_none=True) model.zero_grad(set_to_none=True)
# logging # logging
num_tokens_item = num_tokens.item()
train_loss_item = total_loss_sum.item() / num_tokens_item train_loss_item = total_loss_sum.item() / num_tokens_item
print0(f"Step {step:05d}/{num_iterations:05d} | Training loss: {train_loss_item:.6f}| lrm: {lrm:.6f}| num_tokens: {num_tokens_item:,}") print0(f"Step {step:05d}/{num_iterations:05d} | Training loss: {train_loss_item:.6f}| lrm: {lrm:.6f}| num_tokens: {num_tokens_item:,}")
wandb_run.log({ wandb_run.log({