This commit is contained in:
Jingu Kang 2025-11-16 17:51:00 -08:00 committed by GitHub
commit bad88f37df
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -212,18 +212,30 @@ for step in range(num_iterations):
break break
# evaluate the gradient # evaluate the gradient
total_loss_sum = torch.tensor(0.0, device=device) # sum of losses
num_tokens = torch.tensor(0, device=device) # the number of "active" tokens of supervision seen num_tokens = torch.tensor(0, device=device) # the number of "active" tokens of supervision seen
for micro_step in range(grad_accum_steps): for micro_step in range(grad_accum_steps):
train_inputs, train_targets = next(train_iter) train_inputs, train_targets = next(train_iter)
with autocast_ctx: with autocast_ctx:
loss = model(train_inputs, train_targets) loss = model(train_inputs, train_targets, loss_reduction='sum')
train_loss = loss.detach() # for logging total_loss_sum += loss.detach() # for logging
loss = loss / grad_accum_steps # each .backward() is a grad sum => normalize loss here
loss.backward() # accumulate the gradient loss.backward() # accumulate the gradient
num_tokens += (train_targets >= 0).sum() num_tokens += (train_targets >= 0).sum()
if ddp: if ddp:
dist.all_reduce(total_loss_sum, op=dist.ReduceOp.SUM)
dist.all_reduce(num_tokens, op=dist.ReduceOp.SUM) # sum over ranks dist.all_reduce(num_tokens, op=dist.ReduceOp.SUM) # sum over ranks
# scale gradients by total number of tokens
num_tokens_item = num_tokens.item()
if num_tokens_item == 0:
print0(f"Warning: the number of valid tokens in train targets is 0 at step {step}, skipping model update")
model.zero_grad(set_to_none=True)
continue
for param in model.parameters():
if param.grad is not None:
param.grad.div_(num_tokens_item)
# learning rate scheduler # learning rate scheduler
lrm = get_lr_multiplier(step) lrm = get_lr_multiplier(step)
for opt in optimizers: for opt in optimizers:
@ -236,8 +248,7 @@ for step in range(num_iterations):
model.zero_grad(set_to_none=True) model.zero_grad(set_to_none=True)
# logging # logging
train_loss_item = train_loss.item() train_loss_item = total_loss_sum.item() / num_tokens_item
num_tokens_item = num_tokens.item()
print0(f"Step {step:05d}/{num_iterations:05d} | Training loss: {train_loss_item:.6f}| lrm: {lrm:.6f}| num_tokens: {num_tokens_item:,}") print0(f"Step {step:05d}/{num_iterations:05d} | Training loss: {train_loss_item:.6f}| lrm: {lrm:.6f}| num_tokens: {num_tokens_item:,}")
wandb_run.log({ wandb_run.log({
"step": step, "step": step,