nanochat/pyproject.toml
2026-01-18 23:32:55 -08:00

76 lines
1.4 KiB
TOML

[project]
name = "nanochat"
version = "0.1.0"
description = "the minimal full-stack ChatGPT clone"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"datasets>=4.0.0",
"fastapi>=0.117.1",
"ipykernel>=7.1.0",
"kernels>=0.11.7",
"matplotlib>=3.10.8",
"psutil>=7.1.0",
"python-dotenv>=1.2.1",
"regex>=2025.9.1",
"rustbpe>=0.1.0",
"scipy>=1.15.3",
"setuptools>=80.9.0",
"tabulate>=0.9.0",
"tiktoken>=0.11.0",
"tokenizers>=0.22.0",
"torch>=2.9.0",
"transformers>=4.57.3",
"uvicorn>=0.36.0",
"wandb>=0.21.3",
"zstandard>=0.25.0",
]
[dependency-groups]
dev = [
"pytest>=8.0.0",
]
[tool.pytest.ini_options]
markers = [
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
]
testpaths = ["tests"]
python_files = ["test_*.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
# target torch to cuda 12.8 or CPU
[tool.uv.sources]
torch = [
{ index = "pytorch-cpu", extra = "cpu" },
{ index = "pytorch-cu128", extra = "gpu" },
]
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
explicit = true
[[tool.uv.index]]
name = "pytorch-cu128"
url = "https://download.pytorch.org/whl/cu128"
explicit = true
[project.optional-dependencies]
cpu = [
"torch>=2.9.1",
]
gpu = [
"torch>=2.9.1",
"nvidia-nvshmem-cu12>=3.3",
]
[tool.uv]
conflicts = [
[
{ extra = "cpu" },
{ extra = "gpu" },
],
]