mirror of
https://github.com/karpathy/nanochat.git
synced 2025-12-06 04:12:13 +00:00
Small fixes to typos
This commit is contained in:
commit
4763ce612a
|
|
@ -184,6 +184,7 @@ python -m pytest tests/test_rustbpe.py -v -s
|
||||||
│ ├── smoltalk.py # Conglomerate dataset of SmolTalk from HF
|
│ ├── smoltalk.py # Conglomerate dataset of SmolTalk from HF
|
||||||
│ └── spellingbee.py # Task teaching model to spell/count letters
|
│ └── spellingbee.py # Task teaching model to spell/count letters
|
||||||
├── tests
|
├── tests
|
||||||
|
│ └── test_engine.py
|
||||||
│ └── test_rustbpe.py
|
│ └── test_rustbpe.py
|
||||||
└── uv.lock
|
└── uv.lock
|
||||||
```
|
```
|
||||||
|
|
|
||||||
|
|
@ -9,9 +9,9 @@ import torch.distributed as dist
|
||||||
def evaluate_bpb(model, batches, steps, token_bytes):
|
def evaluate_bpb(model, batches, steps, token_bytes):
|
||||||
"""
|
"""
|
||||||
Instead of the naive 'mean loss', this function returns the bits per byte (bpb),
|
Instead of the naive 'mean loss', this function returns the bits per byte (bpb),
|
||||||
which is a tokenization vocab size-indepedent metric, meaning you are still comparing
|
which is a tokenization vocab size-independent metric, meaning you are still comparing
|
||||||
apples:apples if you change the vocab size. The way this works is that instead of just
|
apples:apples if you change the vocab size. The way this works is that instead of just
|
||||||
calculating the average loss as usual, you calculate the sum loss, and indepependently
|
calculating the average loss as usual, you calculate the sum loss, and independently
|
||||||
also the sum bytes (of all the target tokens), and divide. This normalizes the loss by
|
also the sum bytes (of all the target tokens), and divide. This normalizes the loss by
|
||||||
the number of bytes that the target tokens represent.
|
the number of bytes that the target tokens represent.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
"""
|
"""
|
||||||
Evaluate the Chat model.
|
Evaluate the Chat model.
|
||||||
All the generic code lives here, and all the evlauation-specific
|
All the generic code lives here, and all the evaluation-specific
|
||||||
code lives in nanochat directory and is imported from here.
|
code lives in nanochat directory and is imported from here.
|
||||||
|
|
||||||
Example runs:
|
Example runs:
|
||||||
|
|
|
||||||
|
|
@ -192,7 +192,7 @@ for step in range(num_iterations):
|
||||||
})
|
})
|
||||||
model.train()
|
model.train()
|
||||||
|
|
||||||
# evlauate accuracy of the multiple choice tasks (which are quick to run)
|
# evaluate accuracy of the multiple choice tasks (which are quick to run)
|
||||||
if last_step or (step > 0 and step % eval_metrics_every == 0):
|
if last_step or (step > 0 and step % eval_metrics_every == 0):
|
||||||
model.eval()
|
model.eval()
|
||||||
metrics = {}
|
metrics = {}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user