mirror of
https://github.com/karpathy/nanochat.git
synced 2025-12-06 04:12:13 +00:00
39 lines
1.3 KiB
Docker
39 lines
1.3 KiB
Docker
# Use Google Cloud's Deep Learning Container for PyTorch with GPU support
|
|
# This image is optimized for Vertex AI and includes CUDA, cuDNN, and PyTorch
|
|
FROM gcr.io/deeplearning-platform-release/pytorch-gpu.2-2.py310
|
|
|
|
# Set the working directory.
|
|
WORKDIR /app
|
|
|
|
# Install uv, Rust, and other system dependencies.
|
|
RUN apt-get update && apt-get install -y curl build-essential
|
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
|
# Add uv, cargo, and the future venv bin to the PATH
|
|
ENV PATH="/root/.local/bin:/root/.cargo/bin:/app/.venv/bin:${PATH}"
|
|
|
|
# Copy the entire project into the Docker image.
|
|
COPY . .
|
|
|
|
# Create a virtual environment.
|
|
RUN uv venv
|
|
|
|
# Install Python dependencies using uv.
|
|
RUN uv sync --extra gpu
|
|
|
|
# Install the nanochat package in editable mode
|
|
RUN uv pip install -e .
|
|
|
|
# Install maturin, which is a build dependency.
|
|
RUN uv pip install maturin
|
|
|
|
# Build the rustbpe tokenizer.
|
|
# The maturin executable from the venv should be on the PATH now.
|
|
RUN maturin develop --release --manifest-path rustbpe/Cargo.toml
|
|
|
|
# Set PYTHONPATH to include /app so that nanochat module can be imported
|
|
ENV PYTHONPATH="/app:${PYTHONPATH}"
|
|
|
|
# Set the entrypoint to use the virtual environment's Python.
|
|
ENTRYPOINT ["/app/.venv/bin/python"]
|