Compare commits

...

10 Commits

Author SHA1 Message Date
Dipesh Babu
551fe1dc9e
Merge 2dc85662c3 into f66a780f68 2025-11-14 09:31:50 -06:00
Andrej
f66a780f68
Fix torch.dtype mismatching when running engine inline test. 2025-11-14 07:28:29 -08:00
Andrej
4763ce612a
Small fixes to typos 2025-11-14 07:25:59 -08:00
Sofie Van Landeghem
c6f5bd67db
revert change of base to sft for quick inline test 2025-11-14 12:20:03 +01:00
svlandeg
a2fb3c83a6 fix typos 2025-11-14 11:20:25 +01:00
svlandeg
e5efb4b471 add test_engine.py to file structure 2025-11-14 11:13:42 +01:00
Dipesh Babu
2dc85662c3 fix: safe DDP cleanup (check initialized PG, not just env) 2025-11-05 21:22:35 -05:00
howardgao@outlook.com
b399e43168 fix engine test bug 2025-11-06 08:56:45 +08:00
svlandeg
52e85aaf80 Merge branch 'master' into fix/typo 2025-11-02 13:41:13 +01:00
svlandeg
70319851fc fix typo 2025-10-29 19:48:34 +01:00
6 changed files with 41 additions and 22 deletions

View File

@ -184,6 +184,7 @@ python -m pytest tests/test_rustbpe.py -v -s
│ ├── smoltalk.py # Conglomerate dataset of SmolTalk from HF │ ├── smoltalk.py # Conglomerate dataset of SmolTalk from HF
│ └── spellingbee.py # Task teaching model to spell/count letters │ └── spellingbee.py # Task teaching model to spell/count letters
├── tests ├── tests
│ └── test_engine.py
│ └── test_rustbpe.py │ └── test_rustbpe.py
└── uv.lock └── uv.lock
``` ```

View File

@ -113,12 +113,24 @@ def print_banner():
""" """
print0(banner) print0(banner)
def is_ddp(): def is_ddp_requested() -> bool:
# TODO is there a proper way """
return int(os.environ.get('RANK', -1)) != -1 True if launched by torchrun (env present), even before init.
Used to decide whether we *should* initialize a PG.
"""
return all(k in os.environ for k in ("RANK", "LOCAL_RANK", "WORLD_SIZE"))
def is_ddp_initialized() -> bool:
"""
True if torch.distributed is available and the process group is initialized.
Used at cleanup to avoid destroying a non-existent PG.
"""
return dist.is_available() and dist.is_initialized()
def get_dist_info(): def get_dist_info():
if is_ddp(): if is_ddp_requested():
# We rely on torchrun's env to decide if we SHOULD init.
# (Initialization itself happens in compute init.)
assert all(var in os.environ for var in ['RANK', 'LOCAL_RANK', 'WORLD_SIZE']) assert all(var in os.environ for var in ['RANK', 'LOCAL_RANK', 'WORLD_SIZE'])
ddp_rank = int(os.environ['RANK']) ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK']) ddp_local_rank = int(os.environ['LOCAL_RANK'])
@ -161,8 +173,8 @@ def compute_init(device_type="cuda"): # cuda|cpu|mps
torch.set_float32_matmul_precision("high") # uses tf32 instead of fp32 for matmuls torch.set_float32_matmul_precision("high") # uses tf32 instead of fp32 for matmuls
# Distributed setup: Distributed Data Parallel (DDP), optional, and requires CUDA # Distributed setup: Distributed Data Parallel (DDP), optional, and requires CUDA
ddp, ddp_rank, ddp_local_rank, ddp_world_size = get_dist_info() is_ddp_requested, ddp_rank, ddp_local_rank, ddp_world_size = get_dist_info()
if ddp and device_type == "cuda": if is_ddp_requested and device_type == "cuda":
device = torch.device("cuda", ddp_local_rank) device = torch.device("cuda", ddp_local_rank)
torch.cuda.set_device(device) # make "cuda" default to this device torch.cuda.set_device(device) # make "cuda" default to this device
dist.init_process_group(backend="nccl", device_id=device) dist.init_process_group(backend="nccl", device_id=device)
@ -173,11 +185,11 @@ def compute_init(device_type="cuda"): # cuda|cpu|mps
if ddp_rank == 0: if ddp_rank == 0:
logger.info(f"Distributed world size: {ddp_world_size}") logger.info(f"Distributed world size: {ddp_world_size}")
return ddp, ddp_rank, ddp_local_rank, ddp_world_size, device return is_ddp_requested, ddp_rank, ddp_local_rank, ddp_world_size, device
def compute_cleanup(): def compute_cleanup():
"""Companion function to compute_init, to clean things up before script exit""" """Companion function to compute_init, to clean things up before script exit"""
if is_ddp(): if is_ddp_initialized():
dist.destroy_process_group() dist.destroy_process_group()
class DummyWandb: class DummyWandb:

View File

@ -17,8 +17,9 @@ import signal
import warnings import warnings
from contextlib import contextmanager from contextlib import contextmanager
from collections import deque from collections import deque
from nanochat.common import compute_init from nanochat.common import compute_init, autodetect_device_type
from nanochat.checkpoint_manager import load_model from nanochat.checkpoint_manager import load_model
from contextlib import nullcontext
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# Calculator tool helpers # Calculator tool helpers
@ -328,6 +329,9 @@ if __name__ == "__main__":
import time import time
# init compute # init compute
ddp, ddp_rank, ddp_local_rank, ddp_world_size, device = compute_init() ddp, ddp_rank, ddp_local_rank, ddp_world_size, device = compute_init()
device_type = autodetect_device_type()
autocast_ctx = torch.amp.autocast(device_type=device_type, dtype=torch.bfloat16) if device_type == "cuda" else nullcontext()
# load the model and tokenizer # load the model and tokenizer
model, tokenizer, meta = load_model("base", device, phase="eval") model, tokenizer, meta = load_model("base", device, phase="eval")
bos_token_id = tokenizer.get_bos_token_id() bos_token_id = tokenizer.get_bos_token_id()
@ -340,10 +344,11 @@ if __name__ == "__main__":
torch.cuda.synchronize() torch.cuda.synchronize()
t0 = time.time() t0 = time.time()
stream = model.generate(prompt_tokens, **kwargs) stream = model.generate(prompt_tokens, **kwargs)
for token in stream: with autocast_ctx:
generated_tokens.append(token) for token in stream:
chunk = tokenizer.decode([token]) generated_tokens.append(token)
print(chunk, end="", flush=True) chunk = tokenizer.decode([token])
print(chunk, end="", flush=True)
print() print()
torch.cuda.synchronize() torch.cuda.synchronize()
t1 = time.time() t1 = time.time()
@ -355,11 +360,12 @@ if __name__ == "__main__":
stream = engine.generate(prompt_tokens, num_samples=1, **kwargs) # note: runs in fp32 stream = engine.generate(prompt_tokens, num_samples=1, **kwargs) # note: runs in fp32
torch.cuda.synchronize() torch.cuda.synchronize()
t0 = time.time() t0 = time.time()
for token_column, token_masks in stream: with autocast_ctx:
token = token_column[0] # only print out the first row for token_column, token_masks in stream:
generated_tokens.append(token) token = token_column[0] # only print out the first row
chunk = tokenizer.decode([token]) generated_tokens.append(token)
print(chunk, end="", flush=True) chunk = tokenizer.decode([token])
print(chunk, end="", flush=True)
print() print()
torch.cuda.synchronize() torch.cuda.synchronize()
t1 = time.time() t1 = time.time()

View File

@ -9,9 +9,9 @@ import torch.distributed as dist
def evaluate_bpb(model, batches, steps, token_bytes): def evaluate_bpb(model, batches, steps, token_bytes):
""" """
Instead of the naive 'mean loss', this function returns the bits per byte (bpb), Instead of the naive 'mean loss', this function returns the bits per byte (bpb),
which is a tokenization vocab size-indepedent metric, meaning you are still comparing which is a tokenization vocab size-independent metric, meaning you are still comparing
apples:apples if you change the vocab size. The way this works is that instead of just apples:apples if you change the vocab size. The way this works is that instead of just
calculating the average loss as usual, you calculate the sum loss, and indepependently calculating the average loss as usual, you calculate the sum loss, and independently
also the sum bytes (of all the target tokens), and divide. This normalizes the loss by also the sum bytes (of all the target tokens), and divide. This normalizes the loss by
the number of bytes that the target tokens represent. the number of bytes that the target tokens represent.

View File

@ -1,6 +1,6 @@
""" """
Evaluate the Chat model. Evaluate the Chat model.
All the generic code lives here, and all the evlauation-specific All the generic code lives here, and all the evaluation-specific
code lives in nanochat directory and is imported from here. code lives in nanochat directory and is imported from here.
Example runs: Example runs:

View File

@ -192,7 +192,7 @@ for step in range(num_iterations):
}) })
model.train() model.train()
# evlauate accuracy of the multiple choice tasks (which are quick to run) # evaluate accuracy of the multiple choice tasks (which are quick to run)
if last_step or (step > 0 and step % eval_metrics_every == 0): if last_step or (step > 0 and step % eval_metrics_every == 0):
model.eval() model.eval()
metrics = {} metrics = {}