Compare commits

...

5 Commits

Author SHA1 Message Date
Jingu Kang
bad88f37df
Merge 42b05eea7e into bc1fca39f3 2025-11-16 17:51:00 -08:00
Andrej Karpathy
bc1fca39f3 mqa -> gqa to reduce confusion 2025-11-15 15:43:37 +00:00
kibitzing
42b05eea7e Add guard against division by zero in chat_sft when num_tokens is 0 2025-10-15 13:24:00 +00:00
kibitzing
f5001141ec Revert model source to mid 2025-10-15 10:29:49 +00:00
kibitzing
b48d210795 Fix gradient accumulation for variable length sequences 2025-10-15 08:56:58 +00:00
2 changed files with 18 additions and 7 deletions

View File

@ -8,7 +8,7 @@ Notable features:
- norm after token embedding
- no learnable params in rmsnorm
- no bias in linear layers
- Multi-Query Attention (MQA) support for more efficient inference
- Group-Query Attention (GQA) support for more efficient inference
"""
import math
@ -29,7 +29,7 @@ class GPTConfig:
vocab_size: int = 50304
n_layer: int = 12
n_head: int = 6 # number of query heads
n_kv_head: int = 6 # number of key/value heads (MQA)
n_kv_head: int = 6 # number of key/value heads (GQA)
n_embd: int = 768

View File

@ -212,18 +212,30 @@ for step in range(num_iterations):
break
# evaluate the gradient
total_loss_sum = torch.tensor(0.0, device=device) # sum of losses
num_tokens = torch.tensor(0, device=device) # the number of "active" tokens of supervision seen
for micro_step in range(grad_accum_steps):
train_inputs, train_targets = next(train_iter)
with autocast_ctx:
loss = model(train_inputs, train_targets)
train_loss = loss.detach() # for logging
loss = loss / grad_accum_steps # each .backward() is a grad sum => normalize loss here
loss = model(train_inputs, train_targets, loss_reduction='sum')
total_loss_sum += loss.detach() # for logging
loss.backward() # accumulate the gradient
num_tokens += (train_targets >= 0).sum()
if ddp:
dist.all_reduce(total_loss_sum, op=dist.ReduceOp.SUM)
dist.all_reduce(num_tokens, op=dist.ReduceOp.SUM) # sum over ranks
# scale gradients by total number of tokens
num_tokens_item = num_tokens.item()
if num_tokens_item == 0:
print0(f"Warning: the number of valid tokens in train targets is 0 at step {step}, skipping model update")
model.zero_grad(set_to_none=True)
continue
for param in model.parameters():
if param.grad is not None:
param.grad.div_(num_tokens_item)
# learning rate scheduler
lrm = get_lr_multiplier(step)
for opt in optimizers:
@ -236,8 +248,7 @@ for step in range(num_iterations):
model.zero_grad(set_to_none=True)
# logging
train_loss_item = train_loss.item()
num_tokens_item = num_tokens.item()
train_loss_item = total_loss_sum.item() / num_tokens_item
print0(f"Step {step:05d}/{num_iterations:05d} | Training loss: {train_loss_item:.6f}| lrm: {lrm:.6f}| num_tokens: {num_tokens_item:,}")
wandb_run.log({
"step": step,