From c5e8ce370ce2face6feba26b0be076dca1fe5ae0 Mon Sep 17 00:00:00 2001 From: zolopgh Date: Sat, 25 Apr 2026 12:53:11 -0400 Subject: [PATCH] batch baseline speedrun.sh scripts --- runs/pace_stage1_tokenizer.sh | 42 ++++++++++++++++++ runs/pace_stage2a_pretrain.sh | 43 +++++++++++++++++++ runs/pace_stage2b_pretrain.sh | 68 +++++++++++++++++++++++++++++ runs/pace_stage2c_pretrain.sh | 68 +++++++++++++++++++++++++++++ runs/pace_stage2d_pretrain.sh | 68 +++++++++++++++++++++++++++++ runs/pace_stage3_sft.sh | 55 ++++++++++++++++++++++++ runs/pace_submit.sh | 81 +++++++++++++++++++++++++++++++++++ 7 files changed, 425 insertions(+) create mode 100644 runs/pace_stage1_tokenizer.sh create mode 100644 runs/pace_stage2a_pretrain.sh create mode 100644 runs/pace_stage2b_pretrain.sh create mode 100644 runs/pace_stage2c_pretrain.sh create mode 100644 runs/pace_stage2d_pretrain.sh create mode 100644 runs/pace_stage3_sft.sh create mode 100644 runs/pace_submit.sh diff --git a/runs/pace_stage1_tokenizer.sh b/runs/pace_stage1_tokenizer.sh new file mode 100644 index 00000000..8caa6214 --- /dev/null +++ b/runs/pace_stage1_tokenizer.sh @@ -0,0 +1,42 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=24 +#SBATCH --mem=64G +#SBATCH -t 2:00:00 +#SBATCH -J nanochat-stage1-tokenizer +#SBATCH -o runs/logs/stage1_%j.out +#SBATCH -e runs/logs/stage1_%j.err + +# Stage 1 + +set -e +cd "$HOME/scratch/nanochat" + +export OMP_NUM_THREADS=1 +export NANOCHAT_BASE_DIR="$HOME/scratch/nanochat" +mkdir -p "$NANOCHAT_BASE_DIR" +mkdir -p runs/logs + +echo "=== Stage 1: Tokenizer ===" +echo "Base dir: $NANOCHAT_BASE_DIR" +echo "Started: $(date)" + +command -v uv &> /dev/null || curl -LsSf https://astral.sh/uv/install.sh | sh +export PATH="$HOME/.local/bin:$PATH" +[ -d ".venv" ] || uv venv +uv sync --extra gpu +source .venv/bin/activate +python -m nanochat.report reset +python -m nanochat.dataset -n 8 +python -m nanochat.dataset -n 170 & +DATASET_DOWNLOAD_PID=$! + +python -m scripts.tok_train +python -m scripts.tok_eval + +echo "Waiting for full dataset download..." +wait $DATASET_DOWNLOAD_PID + +echo "=== Stage 1 complete: $(date) ===" +echo "Dataset and tokenizer ready in $NANOCHAT_BASE_DIR" diff --git a/runs/pace_stage2a_pretrain.sh b/runs/pace_stage2a_pretrain.sh new file mode 100644 index 00000000..bee6d11f --- /dev/null +++ b/runs/pace_stage2a_pretrain.sh @@ -0,0 +1,43 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -p ice-gpu +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gres=gpu:2 +#SBATCH --constraint="gpu-h100|gpu-h200" +#SBATCH --mem-per-gpu=48G +#SBATCH -t 3:55:00 +#SBATCH -J nanochat-stage2a +#SBATCH -o runs/logs/stage2a_%j.out +#SBATCH -e runs/logs/stage2a_%j.err + +# Stage 2a + +set -e +cd "$HOME/scratch/nanochat" + +export OMP_NUM_THREADS=1 +export NANOCHAT_BASE_DIR="$HOME/scratch/nanochat" +mkdir -p runs/logs + +WANDB_RUN="${WANDB_RUN:-dummy}" +CHECKPOINT_DIR="$NANOCHAT_BASE_DIR/base_checkpoints/d24" +DONE_MARKER="$CHECKPOINT_DIR/.training_complete" + +echo "=== Stage 2a: Pretraining (chunk 1) ===" +echo "Base dir: $NANOCHAT_BASE_DIR" +echo "WANDB_RUN: $WANDB_RUN" +echo "Started: $(date)" + +source .venv/bin/activate + +torchrun --standalone --nproc_per_node=2 -m scripts.base_train -- \ + --depth=24 \ + --target-param-data-ratio=8 \ + --device-batch-size=16 \ + --save-every=200 \ + --run=$WANDB_RUN + +mkdir -p "$CHECKPOINT_DIR" +touch "$DONE_MARKER" +echo "=== Stage 2a complete: $(date) ===" diff --git a/runs/pace_stage2b_pretrain.sh b/runs/pace_stage2b_pretrain.sh new file mode 100644 index 00000000..7b8ea806 --- /dev/null +++ b/runs/pace_stage2b_pretrain.sh @@ -0,0 +1,68 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -p ice-gpu +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gres=gpu:2 +#SBATCH --constraint="gpu-h100|gpu-h200" +#SBATCH --mem-per-gpu=48G +#SBATCH -t 3:55:00 +#SBATCH -J nanochat-stage2b +#SBATCH -o runs/logs/stage2b_%j.out +#SBATCH -e runs/logs/stage2b_%j.err + +# Stage 2b + +set -e +cd "$HOME/scratch/nanochat" + +export OMP_NUM_THREADS=1 +export NANOCHAT_BASE_DIR="$HOME/scratch/nanochat" +mkdir -p runs/logs + +WANDB_RUN="${WANDB_RUN:-dummy}" +CHECKPOINT_DIR="$NANOCHAT_BASE_DIR/base_checkpoints/d24" +DONE_MARKER="$CHECKPOINT_DIR/.training_complete" + +echo "=== Stage 2b: Pretraining (chunk 2 / auto-resume) ===" +echo "Base dir: $NANOCHAT_BASE_DIR" +echo "Started: $(date)" + +if [ -f "$DONE_MARKER" ]; then + echo "Training already complete (marker: $DONE_MARKER). Nothing to do." + echo "=== Stage 2b skipped: $(date) ===" + exit 0 +fi + +source .venv/bin/activate + +LAST_STEP=$(python -c " +import glob, os, sys +files = glob.glob('${CHECKPOINT_DIR}/model_*.pt') +if not files: + print(0); sys.exit(0) +print(max(int(os.path.basename(f).split('_')[-1].split('.')[0]) for f in files)) +") + +if [ "$LAST_STEP" -eq 0 ]; then + echo "No checkpoint found — starting from scratch" + torchrun --standalone --nproc_per_node=2 -m scripts.base_train -- \ + --depth=24 \ + --target-param-data-ratio=8 \ + --device-batch-size=16 \ + --save-every=200 \ + --run=$WANDB_RUN +else + echo "Resuming from step $LAST_STEP" + torchrun --standalone --nproc_per_node=2 -m scripts.base_train -- \ + --depth=24 \ + --target-param-data-ratio=8 \ + --device-batch-size=16 \ + --save-every=200 \ + --resume-from-step=$LAST_STEP \ + --run=$WANDB_RUN +fi + +mkdir -p "$CHECKPOINT_DIR" +touch "$DONE_MARKER" +echo "=== Stage 2b complete: $(date) ===" diff --git a/runs/pace_stage2c_pretrain.sh b/runs/pace_stage2c_pretrain.sh new file mode 100644 index 00000000..92c748e9 --- /dev/null +++ b/runs/pace_stage2c_pretrain.sh @@ -0,0 +1,68 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -p ice-gpu +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gres=gpu:2 +#SBATCH --constraint="gpu-h100|gpu-h200" +#SBATCH --mem-per-gpu=48G +#SBATCH -t 3:55:00 +#SBATCH -J nanochat-stage2c +#SBATCH -o runs/logs/stage2c_%j.out +#SBATCH -e runs/logs/stage2c_%j.err + +# Stage 2c + +set -e +cd "$HOME/scratch/nanochat" + +export OMP_NUM_THREADS=1 +export NANOCHAT_BASE_DIR="$HOME/scratch/nanochat" +mkdir -p runs/logs + +WANDB_RUN="${WANDB_RUN:-dummy}" +CHECKPOINT_DIR="$NANOCHAT_BASE_DIR/base_checkpoints/d24" +DONE_MARKER="$CHECKPOINT_DIR/.training_complete" + +echo "=== Stage 2c: Pretraining (chunk 3 / auto-resume) ===" +echo "Base dir: $NANOCHAT_BASE_DIR" +echo "Started: $(date)" + +if [ -f "$DONE_MARKER" ]; then + echo "Training already complete (marker: $DONE_MARKER). Nothing to do." + echo "=== Stage 2c skipped: $(date) ===" + exit 0 +fi + +source .venv/bin/activate + +LAST_STEP=$(python -c " +import glob, os, sys +files = glob.glob('${CHECKPOINT_DIR}/model_*.pt') +if not files: + print(0); sys.exit(0) +print(max(int(os.path.basename(f).split('_')[-1].split('.')[0]) for f in files)) +") + +if [ "$LAST_STEP" -eq 0 ]; then + echo "No checkpoint found — starting from scratch" + torchrun --standalone --nproc_per_node=2 -m scripts.base_train -- \ + --depth=24 \ + --target-param-data-ratio=8 \ + --device-batch-size=16 \ + --save-every=200 \ + --run=$WANDB_RUN +else + echo "Resuming from step $LAST_STEP" + torchrun --standalone --nproc_per_node=2 -m scripts.base_train -- \ + --depth=24 \ + --target-param-data-ratio=8 \ + --device-batch-size=16 \ + --save-every=200 \ + --resume-from-step=$LAST_STEP \ + --run=$WANDB_RUN +fi + +mkdir -p "$CHECKPOINT_DIR" +touch "$DONE_MARKER" +echo "=== Stage 2c complete: $(date) ===" diff --git a/runs/pace_stage2d_pretrain.sh b/runs/pace_stage2d_pretrain.sh new file mode 100644 index 00000000..70030001 --- /dev/null +++ b/runs/pace_stage2d_pretrain.sh @@ -0,0 +1,68 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -p ice-gpu +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gres=gpu:2 +#SBATCH --constraint="gpu-h100|gpu-h200" +#SBATCH --mem-per-gpu=48G +#SBATCH -t 3:55:00 +#SBATCH -J nanochat-stage2d +#SBATCH -o runs/logs/stage2d_%j.out +#SBATCH -e runs/logs/stage2d_%j.err + +# Stage 2d + +set -e +cd "$HOME/scratch/nanochat" + +export OMP_NUM_THREADS=1 +export NANOCHAT_BASE_DIR="$HOME/scratch/nanochat" +mkdir -p runs/logs + +WANDB_RUN="${WANDB_RUN:-dummy}" +CHECKPOINT_DIR="$NANOCHAT_BASE_DIR/base_checkpoints/d24" +DONE_MARKER="$CHECKPOINT_DIR/.training_complete" + +echo "=== Stage 2d: Pretraining (chunk 4 / auto-resume) ===" +echo "Base dir: $NANOCHAT_BASE_DIR" +echo "Started: $(date)" + +if [ -f "$DONE_MARKER" ]; then + echo "Training already complete (marker: $DONE_MARKER). Nothing to do." + echo "=== Stage 2d skipped: $(date) ===" + exit 0 +fi + +source .venv/bin/activate + +LAST_STEP=$(python -c " +import glob, os, sys +files = glob.glob('${CHECKPOINT_DIR}/model_*.pt') +if not files: + print(0); sys.exit(0) +print(max(int(os.path.basename(f).split('_')[-1].split('.')[0]) for f in files)) +") + +if [ "$LAST_STEP" -eq 0 ]; then + echo "No checkpoint found — starting from scratch" + torchrun --standalone --nproc_per_node=2 -m scripts.base_train -- \ + --depth=24 \ + --target-param-data-ratio=8 \ + --device-batch-size=16 \ + --save-every=200 \ + --run=$WANDB_RUN +else + echo "Resuming from step $LAST_STEP" + torchrun --standalone --nproc_per_node=2 -m scripts.base_train -- \ + --depth=24 \ + --target-param-data-ratio=8 \ + --device-batch-size=16 \ + --save-every=200 \ + --resume-from-step=$LAST_STEP \ + --run=$WANDB_RUN +fi + +mkdir -p "$CHECKPOINT_DIR" +touch "$DONE_MARKER" +echo "=== Stage 2d complete: $(date) ===" diff --git a/runs/pace_stage3_sft.sh b/runs/pace_stage3_sft.sh new file mode 100644 index 00000000..f354f7c4 --- /dev/null +++ b/runs/pace_stage3_sft.sh @@ -0,0 +1,55 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -p ice-gpu +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gres=gpu:2 +#SBATCH --constraint="gpu-h100|gpu-h200" +#SBATCH --mem-per-gpu=48G +#SBATCH -t 3:55:00 +#SBATCH -J nanochat-stage3-sft +#SBATCH -o runs/logs/stage3_%j.out +#SBATCH -e runs/logs/stage3_%j.err + +# Stage 3 + +set -e +cd "$HOME/scratch/nanochat" + +export OMP_NUM_THREADS=1 +export NANOCHAT_BASE_DIR="$HOME/scratch/nanochat" +mkdir -p runs/logs + +WANDB_RUN="${WANDB_RUN:-dummy}" + +echo "=== Stage 3: Eval + SFT ===" +echo "Base dir: $NANOCHAT_BASE_DIR" +echo "WANDB_RUN: $WANDB_RUN" +echo "Started: $(date)" + +CHECKPOINT_DIR="$NANOCHAT_BASE_DIR/base_checkpoints/d24" +DONE_MARKER="$CHECKPOINT_DIR/.training_complete" +if [ ! -f "$DONE_MARKER" ]; then + echo "ERROR: pretraining did not finish — missing $DONE_MARKER" + echo "Re-run pretrain chunks 2a–2d until the marker is created before running stage 3." + exit 1 +fi + +source .venv/bin/activate + +torchrun --standalone --nproc_per_node=2 -m scripts.base_eval -- \ + --device-batch-size=16 + +curl -L -o "$NANOCHAT_BASE_DIR/identity_conversations.jsonl" \ + https://karpathy-public.s3.us-west-2.amazonaws.com/identity_conversations.jsonl + +torchrun --standalone --nproc_per_node=2 -m scripts.chat_sft -- \ + --device-batch-size=16 \ + --run=$WANDB_RUN + +torchrun --standalone --nproc_per_node=2 -m scripts.chat_eval -- -i sft + +python -m nanochat.report generate + +echo "=== Stage 3 complete: $(date) ===" + diff --git a/runs/pace_submit.sh b/runs/pace_submit.sh new file mode 100644 index 00000000..951d8849 --- /dev/null +++ b/runs/pace_submit.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +# Pipeline: +# Stage 1 — CPU: tokenizer + dataset +# Stage 2a — GPU: pretraining chunk 1 +# Stage 2b — GPU: auto-resume chunk 2 +# Stage 2c — GPU: auto-resume chunk 3 +# Stage 2d — GPU: auto-resume chunk 4 +# Stage 3 — GPU: base eval + SFT + chat eval + report +# +# Usage (from repo root): +# bash runs/pace_submit.sh +# +# Optional W&B logging: +# WANDB_RUN=my-run bash runs/pace_submit.sh + +set -e +cd "$HOME/scratch/nanochat" + +mkdir -p runs/logs + +WANDB_RUN="${WANDB_RUN:-dummy}" +export WANDB_RUN + +echo "Submitting nanochat full pipeline..." +echo "WANDB_RUN=$WANDB_RUN" +echo "" + +# Stage 1 +JOB1=$(sbatch --parsable \ + --export=ALL,WANDB_RUN=$WANDB_RUN \ + runs/pace_stage1_tokenizer.sh) +echo "Stage 1 submitted: job $JOB1 (tokenizer + dataset)" + +# Stage 2a +JOB2A=$(sbatch --parsable \ + --dependency=afterok:$JOB1 \ + --export=ALL,WANDB_RUN=$WANDB_RUN \ + runs/pace_stage2a_pretrain.sh) +echo "Stage 2a submitted: job $JOB2A (pretrain chunk 1, depends on $JOB1)" + +# Stage 2b +JOB2B=$(sbatch --parsable \ + --dependency=afterany:$JOB2A \ + --export=ALL,WANDB_RUN=$WANDB_RUN \ + runs/pace_stage2b_pretrain.sh) +echo "Stage 2b submitted: job $JOB2B (pretrain chunk 2, depends on $JOB2A)" + +# Stage 2c +JOB2C=$(sbatch --parsable \ + --dependency=afterany:$JOB2B \ + --export=ALL,WANDB_RUN=$WANDB_RUN \ + runs/pace_stage2c_pretrain.sh) +echo "Stage 2c submitted: job $JOB2C (pretrain chunk 3, depends on $JOB2B)" + +# Stage 2d +JOB2D=$(sbatch --parsable \ + --dependency=afterany:$JOB2C \ + --export=ALL,WANDB_RUN=$WANDB_RUN \ + runs/pace_stage2d_pretrain.sh) +echo "Stage 2d submitted: job $JOB2D (pretrain chunk 4, depends on $JOB2C)" + +# Stage 3 +JOB3=$(sbatch --parsable \ + --dependency=afterok:$JOB2D \ + --export=ALL,WANDB_RUN=$WANDB_RUN \ + runs/pace_stage3_sft.sh) +echo "Stage 3 submitted: job $JOB3 (eval + SFT, depends on $JOB2D)" + +echo "" +echo "All jobs queued. Monitor with:" +echo " squeue -u $USER" +echo " tail -f runs/logs/stage1_${JOB1}.out" +echo " tail -f runs/logs/stage2a_${JOB2A}.out" +echo " tail -f runs/logs/stage2b_${JOB2B}.out" +echo " tail -f runs/logs/stage2c_${JOB2C}.out" +echo " tail -f runs/logs/stage2d_${JOB2D}.out" +echo " tail -f runs/logs/stage3_${JOB3}.out" +echo "" +echo "To cancel everything:" +echo " scancel $JOB1 $JOB2A $JOB2B $JOB2C $JOB2D $JOB3"