mirror of
https://github.com/karpathy/nanochat.git
synced 2026-01-23 03:44:19 +00:00
170 lines
6.5 KiB
Bash
170 lines
6.5 KiB
Bash
#!/bin/bash
|
|
|
|
# This script is the "Best ChatGPT clone that $100 can buy",
|
|
# It is designed to run in ~4 hours on 8XH100 node at $3/GPU/hour.
|
|
|
|
# 1) Example launch (simplest):
|
|
# bash speedrun.sh
|
|
# 2) Example launch in a screen session (because the run takes ~4 hours):
|
|
# screen -L -Logfile speedrun.log -S speedrun bash speedrun.sh
|
|
# 3) Example launch with wandb logging, but see below for setting up wandb first:
|
|
# WANDB_RUN=speedrun screen -L -Logfile speedrun.log -S speedrun bash speedrun.sh
|
|
|
|
# Default intermediate artifacts directory is in ~/.cache/nanochat-moe
|
|
export USER="limh23"
|
|
export OMP_NUM_THREADS=1
|
|
export NANOCHAT_BASE_DIR="/thullms/$USER/.cache/nanochat-moe"
|
|
export NANOCHAT_DATA_DIR="/thullms/$USER/.cache/nanochat-moe-data"
|
|
mkdir -p $NANOCHAT_BASE_DIR
|
|
mkdir -p $NANOCHAT_DATA_DIR
|
|
|
|
|
|
|
|
# Use tokenizer from nanochat (not nanochat-moe)
|
|
# Create a symlink to nanochat's tokenizer directory if it doesn't exist
|
|
NANOCHAT_TOKENIZER_DIR="$HOME/.cache/nanochat/tokenizer"
|
|
MOE_TOKENIZER_DIR="$NANOCHAT_BASE_DIR/tokenizer"
|
|
if [ -d "$NANOCHAT_TOKENIZER_DIR" ] && [ ! -e "$MOE_TOKENIZER_DIR" ]; then
|
|
echo "Creating symlink to nanochat tokenizer: $MOE_TOKENIZER_DIR -> $NANOCHAT_TOKENIZER_DIR"
|
|
ln -s "$NANOCHAT_TOKENIZER_DIR" "$MOE_TOKENIZER_DIR"
|
|
elif [ ! -d "$NANOCHAT_TOKENIZER_DIR" ]; then
|
|
echo "Warning: nanochat tokenizer directory not found at $NANOCHAT_TOKENIZER_DIR"
|
|
echo "You may need to train the tokenizer first using nanochat's tok_train.py"
|
|
fi
|
|
|
|
# # -----------------------------------------------------------------------------
|
|
# # China mirror configuration (环境镜像配置)
|
|
|
|
# # Configure pip mirror
|
|
# mkdir -p ~/.pip
|
|
# cat > ~/.pip/pip.conf << 'EOF'
|
|
# [global]
|
|
# index-url = https://pypi.tuna.tsinghua.edu.cn/simple
|
|
# trusted-host = pypi.tuna.tsinghua.edu.cn
|
|
# timeout = 1000
|
|
|
|
# [install]
|
|
# trusted-host = pypi.tuna.tsinghua.edu.cn
|
|
# EOF
|
|
|
|
# # Configure Rust mirror
|
|
# export RUSTUP_DIST_SERVER=https://rsproxy.cn
|
|
# export RUSTUP_UPDATE_ROOT=https://rsproxy.cn/rustup
|
|
# SHELL_RC="$HOME/.bashrc"
|
|
# if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
# SHELL_RC="$HOME/.zshrc"
|
|
# fi
|
|
# if ! grep -q "RUSTUP_DIST_SERVER" "$SHELL_RC" 2>/dev/null; then
|
|
# cat >> "$SHELL_RC" << 'EOF'
|
|
|
|
# # Rust 镜像配置
|
|
# export RUSTUP_DIST_SERVER=https://rsproxy.cn
|
|
# export RUSTUP_UPDATE_ROOT=https://rsproxy.cn/rustup
|
|
# EOF
|
|
# fi
|
|
|
|
# # Configure Cargo mirror
|
|
# mkdir -p ~/.cargo
|
|
# cat > ~/.cargo/config << 'EOF'
|
|
# [source.crates-io]
|
|
# replace-with = 'rsproxy-sparse'
|
|
|
|
# [source.rsproxy-sparse]
|
|
# registry = "sparse+https://rsproxy.cn/index/"
|
|
|
|
# [net]
|
|
# git-fetch-with-cli = true
|
|
# EOF
|
|
|
|
# # Configure HuggingFace mirror
|
|
# if ! grep -q "HF_ENDPOINT" "$SHELL_RC" 2>/dev/null; then
|
|
# echo 'export HF_ENDPOINT=https://hf-mirror.com' >> "$SHELL_RC"
|
|
# fi
|
|
# export HF_ENDPOINT=https://hf-mirror.com
|
|
|
|
# # -----------------------------------------------------------------------------
|
|
# # Python venv setup with uv
|
|
|
|
# # install uv (if not already installed)
|
|
# if ! command -v uv &> /dev/null; then
|
|
# pip3 install uv -i https://pypi.tuna.tsinghua.edu.cn/simple
|
|
# fi
|
|
# # create a .venv local virtual environment (if it doesn't exist)
|
|
# [ -d ".venv" ] || uv venv
|
|
# # install the repo dependencies with China mirror
|
|
# export UV_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
|
# uv sync --extra gpu
|
|
# # activate venv so that `python` uses the project's venv instead of system python
|
|
cd $HOME/nanochat-MoE
|
|
source .venv/bin/activate
|
|
|
|
# # -----------------------------------------------------------------------------
|
|
# wandb setup
|
|
# If you wish to use wandb for logging (it's nice!, recommended).
|
|
# 1) Make sure to first log in to wandb, e.g. run:
|
|
# `wandb login`
|
|
# 2) Set the WANDB_RUN environment variable when running this script, e.g.:
|
|
# `WANDB_RUN=d26 bash speedrun.sh`
|
|
WANDB_RUN=moe
|
|
if [ -z "$WANDB_RUN" ]; then
|
|
# by default use "dummy" : it's handled as a special case, skips logging to wandb
|
|
WANDB_RUN=dummy
|
|
fi
|
|
|
|
# -----------------------------------------------------------------------------
|
|
# # During the course of the run, we will be writing markdown reports to the report/
|
|
# # directory in the base dir. This command clears it out and writes a header section
|
|
# # with a bunch of system info and a timestamp that marks the start of the run.
|
|
# python -m nanochat.report reset
|
|
|
|
# # -----------------------------------------------------------------------------
|
|
# # Tokenizer
|
|
|
|
# # Install Rust / Cargo (if not already installed)
|
|
# if ! command -v rustc &> /dev/null; then
|
|
# curl --proto '=https' --tlsv1.2 -sSf https://rsproxy.cn/rustup-init.sh | sh -s -- -y
|
|
# source "$HOME/.cargo/env"
|
|
# fi
|
|
|
|
# # Build the rustbpe Tokenizer
|
|
# uv run maturin develop --release --manifest-path rustbpe/Cargo.toml
|
|
|
|
# # Download the first ~2B characters of pretraining dataset
|
|
# # look at dev/repackage_data_reference.py for details on how this data was prepared
|
|
# # each data shard is ~250M chars
|
|
# # so we download 2e9 / 250e6 = 8 data shards at this point
|
|
# # each shard is ~100MB of text (compressed), so this is about ~800MB of data on disk
|
|
# python -m nanochat.dataset -n 8
|
|
# # Immediately also kick off downloading more shards in the background while tokenizer trains
|
|
# # See comment below for why 240 is the right number here
|
|
# python -m nanochat.dataset -n 240 &
|
|
# DATASET_DOWNLOAD_PID=$!
|
|
# # train the tokenizer with vocab size 2**16 = 65536 on ~2B characters of data
|
|
# python -m scripts.tok_train --max_chars=2000000000
|
|
# # evaluate the tokenizer (report compression ratio etc.)
|
|
# python -m scripts.tok_eval
|
|
|
|
# -----------------------------------------------------------------------------
|
|
# Base model (pretraining)
|
|
|
|
# The d20 model is 561M parameters.
|
|
# Chinchilla says #tokens = 20X #params, so we need 561e6 * 20 = 11.2B tokens.
|
|
# Assume our tokenizer is 4.8 chars/token, this is 11.2B * 4.8 ~= 54B chars.
|
|
# At 250M chars/shard, this is 54B / 250M ~= 216 shards needed for pretraining.
|
|
# Round up to 240 for safety. At ~100MB/shard, this downloads ~24GB of data to disk.
|
|
# (The total number of shards available in the entire dataset is 1822.)
|
|
# echo "Waiting for dataset download to complete..."
|
|
# wait $DATASET_DOWNLOAD_PID
|
|
|
|
|
|
MIN_LR=${MIN_LR:-6e-5}
|
|
LEARNING_RATE=${LEARNING_RATE:-6e-4}
|
|
# Number of processes/GPUs to use
|
|
NPROC_PER_NODE=8
|
|
# Master port for distributed training (default: 29500)
|
|
# Set this to avoid port conflicts when running multiple torchrun tasks simultaneously
|
|
# Example: MASTER_PORT=29501 bash speedrun.sh
|
|
MASTER_PORT=${MASTER_PORT:-29501}
|
|
# # # pretrain the d20 model
|
|
MASTER_PORT=$MASTER_PORT torchrun --standalone --nproc_per_node=$NPROC_PER_NODE -m scripts.base_train >> $NANOCHAT_BASE_DIR/d6_min_lr${MIN_LR}_max_lr${LEARNING_RATE}.log 2>&1
|