From aea2af87ddb2173dde6be61202a34d53b2d33ed1 Mon Sep 17 00:00:00 2001 From: svlandeg Date: Fri, 6 Feb 2026 13:47:35 +0100 Subject: [PATCH 1/2] update comment in speedrun --- runs/speedrun.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runs/speedrun.sh b/runs/speedrun.sh index c423ba6..0f5a22d 100644 --- a/runs/speedrun.sh +++ b/runs/speedrun.sh @@ -69,7 +69,7 @@ python -m scripts.tok_eval echo "Waiting for dataset download to complete..." wait $DATASET_DOWNLOAD_PID -# d24 model (slightly overtrained is enough to beat GPT-2 => increase data:params ratio from compute optimal 10.5 (default) to 12) +# d26 model (slightly overtrained is enough to beat GPT-2 => increase data:params ratio from compute optimal 10.5 (default) to 8.5) torchrun --standalone --nproc_per_node=8 -m scripts.base_train -- --depth=26 --target-param-data-ratio=8.5 --device-batch-size=16 --fp8 --run=$WANDB_RUN # evaluate the model: CORE metric, BPB on train/val, and draw samples torchrun --standalone --nproc_per_node=8 -m scripts.base_eval -- --device-batch-size=16 From 895b937905c5ed0bac6c848fb37bd2d2463a510e Mon Sep 17 00:00:00 2001 From: svlandeg Date: Fri, 6 Feb 2026 13:49:11 +0100 Subject: [PATCH 2/2] fix --- runs/speedrun.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runs/speedrun.sh b/runs/speedrun.sh index 0f5a22d..430b2b8 100644 --- a/runs/speedrun.sh +++ b/runs/speedrun.sh @@ -69,7 +69,7 @@ python -m scripts.tok_eval echo "Waiting for dataset download to complete..." wait $DATASET_DOWNLOAD_PID -# d26 model (slightly overtrained is enough to beat GPT-2 => increase data:params ratio from compute optimal 10.5 (default) to 8.5) +# d26 model (slightly undertrained to beat GPT-2 => decrease data:params ratio from compute optimal 10.5 (default) to 8.5) torchrun --standalone --nproc_per_node=8 -m scripts.base_train -- --depth=26 --target-param-data-ratio=8.5 --device-batch-size=16 --fp8 --run=$WANDB_RUN # evaluate the model: CORE metric, BPB on train/val, and draw samples torchrun --standalone --nproc_per_node=8 -m scripts.base_eval -- --device-batch-size=16