mirror of
https://github.com/karpathy/nanochat.git
synced 2025-12-06 04:12:13 +00:00
Compare commits
5 Commits
da2a053ce0
...
bad88f37df
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bad88f37df | ||
|
|
bc1fca39f3 | ||
|
|
42b05eea7e | ||
|
|
f5001141ec | ||
|
|
b48d210795 |
|
|
@ -8,7 +8,7 @@ Notable features:
|
||||||
- norm after token embedding
|
- norm after token embedding
|
||||||
- no learnable params in rmsnorm
|
- no learnable params in rmsnorm
|
||||||
- no bias in linear layers
|
- no bias in linear layers
|
||||||
- Multi-Query Attention (MQA) support for more efficient inference
|
- Group-Query Attention (GQA) support for more efficient inference
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import math
|
import math
|
||||||
|
|
@ -29,7 +29,7 @@ class GPTConfig:
|
||||||
vocab_size: int = 50304
|
vocab_size: int = 50304
|
||||||
n_layer: int = 12
|
n_layer: int = 12
|
||||||
n_head: int = 6 # number of query heads
|
n_head: int = 6 # number of query heads
|
||||||
n_kv_head: int = 6 # number of key/value heads (MQA)
|
n_kv_head: int = 6 # number of key/value heads (GQA)
|
||||||
n_embd: int = 768
|
n_embd: int = 768
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -212,18 +212,30 @@ for step in range(num_iterations):
|
||||||
break
|
break
|
||||||
|
|
||||||
# evaluate the gradient
|
# evaluate the gradient
|
||||||
|
total_loss_sum = torch.tensor(0.0, device=device) # sum of losses
|
||||||
num_tokens = torch.tensor(0, device=device) # the number of "active" tokens of supervision seen
|
num_tokens = torch.tensor(0, device=device) # the number of "active" tokens of supervision seen
|
||||||
for micro_step in range(grad_accum_steps):
|
for micro_step in range(grad_accum_steps):
|
||||||
train_inputs, train_targets = next(train_iter)
|
train_inputs, train_targets = next(train_iter)
|
||||||
with autocast_ctx:
|
with autocast_ctx:
|
||||||
loss = model(train_inputs, train_targets)
|
loss = model(train_inputs, train_targets, loss_reduction='sum')
|
||||||
train_loss = loss.detach() # for logging
|
total_loss_sum += loss.detach() # for logging
|
||||||
loss = loss / grad_accum_steps # each .backward() is a grad sum => normalize loss here
|
|
||||||
loss.backward() # accumulate the gradient
|
loss.backward() # accumulate the gradient
|
||||||
num_tokens += (train_targets >= 0).sum()
|
num_tokens += (train_targets >= 0).sum()
|
||||||
if ddp:
|
if ddp:
|
||||||
|
dist.all_reduce(total_loss_sum, op=dist.ReduceOp.SUM)
|
||||||
dist.all_reduce(num_tokens, op=dist.ReduceOp.SUM) # sum over ranks
|
dist.all_reduce(num_tokens, op=dist.ReduceOp.SUM) # sum over ranks
|
||||||
|
|
||||||
|
# scale gradients by total number of tokens
|
||||||
|
num_tokens_item = num_tokens.item()
|
||||||
|
if num_tokens_item == 0:
|
||||||
|
print0(f"Warning: the number of valid tokens in train targets is 0 at step {step}, skipping model update")
|
||||||
|
model.zero_grad(set_to_none=True)
|
||||||
|
continue
|
||||||
|
|
||||||
|
for param in model.parameters():
|
||||||
|
if param.grad is not None:
|
||||||
|
param.grad.div_(num_tokens_item)
|
||||||
|
|
||||||
# learning rate scheduler
|
# learning rate scheduler
|
||||||
lrm = get_lr_multiplier(step)
|
lrm = get_lr_multiplier(step)
|
||||||
for opt in optimizers:
|
for opt in optimizers:
|
||||||
|
|
@ -236,8 +248,7 @@ for step in range(num_iterations):
|
||||||
model.zero_grad(set_to_none=True)
|
model.zero_grad(set_to_none=True)
|
||||||
|
|
||||||
# logging
|
# logging
|
||||||
train_loss_item = train_loss.item()
|
train_loss_item = total_loss_sum.item() / num_tokens_item
|
||||||
num_tokens_item = num_tokens.item()
|
|
||||||
print0(f"Step {step:05d}/{num_iterations:05d} | Training loss: {train_loss_item:.6f}| lrm: {lrm:.6f}| num_tokens: {num_tokens_item:,}")
|
print0(f"Step {step:05d}/{num_iterations:05d} | Training loss: {train_loss_item:.6f}| lrm: {lrm:.6f}| num_tokens: {num_tokens_item:,}")
|
||||||
wandb_run.log({
|
wandb_run.log({
|
||||||
"step": step,
|
"step": step,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user