From 94b73ad29aa21da6267e93db6035223f15f692fc Mon Sep 17 00:00:00 2001 From: Marcin Bogdanski Date: Fri, 3 Apr 2026 20:39:55 +0000 Subject: [PATCH 1/5] fix: initialize smear and backout lambdas in init_weights --- nanochat/gpt.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nanochat/gpt.py b/nanochat/gpt.py index 0b822e41..b2656508 100644 --- a/nanochat/gpt.py +++ b/nanochat/gpt.py @@ -237,6 +237,8 @@ class GPT(nn.Module): # Decaying x0 init: earlier layers get more input embedding blending for i in range(n_layer): self.x0_lambdas.data[i] = 0.20 - (0.15 * i / max(n_layer - 1, 1)) + self.smear_lambda.fill_(0.0) + self.backout_lambda.fill_(0.2) # Value embeddings (init like c_v: uniform with same std) for ve in self.value_embeds.values(): From 8ef90bc154e8ffaa5ce53db4a0aef3d22ea73a6b Mon Sep 17 00:00:00 2001 From: svlandeg Date: Mon, 13 Apr 2026 10:50:57 +0200 Subject: [PATCH 2/5] add setuptools for CPU run --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index a6e2cca6..0527369f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,7 @@ explicit = true [project.optional-dependencies] cpu = [ + "setuptools>=65.0.0", "torch==2.9.1", ] gpu = [ From 12839c11e3cfa4c51ca5687e8406e0de3025ab33 Mon Sep 17 00:00:00 2001 From: svlandeg Date: Mon, 13 Apr 2026 11:20:38 +0200 Subject: [PATCH 3/5] update uv lock --- uv.lock | 2 ++ 1 file changed, 2 insertions(+) diff --git a/uv.lock b/uv.lock index 94558149..c81d3303 100644 --- a/uv.lock +++ b/uv.lock @@ -1507,6 +1507,7 @@ dependencies = [ [package.optional-dependencies] cpu = [ + { name = "setuptools" }, { name = "torch", version = "2.9.1", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "(sys_platform == 'darwin' and extra == 'extra-8-nanochat-cpu') or (extra == 'extra-8-nanochat-cpu' and extra == 'extra-8-nanochat-gpu')" }, { name = "torch", version = "2.9.1+cpu", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "(sys_platform != 'darwin' and extra == 'extra-8-nanochat-cpu') or (extra == 'extra-8-nanochat-cpu' and extra == 'extra-8-nanochat-gpu')" }, ] @@ -1530,6 +1531,7 @@ requires-dist = [ { name = "kernels", specifier = ">=0.11.7" }, { name = "psutil", specifier = ">=7.1.0" }, { name = "rustbpe", specifier = ">=0.1.0" }, + { name = "setuptools", marker = "extra == 'cpu'", specifier = ">=65.0.0" }, { name = "tiktoken", specifier = ">=0.11.0" }, { name = "tokenizers", specifier = ">=0.22.0" }, { name = "torch", specifier = "==2.9.1" }, From 9822cc7424aabffd0601f4ddfb465dba269f9765 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 13 Apr 2026 14:03:18 +0200 Subject: [PATCH 4/5] use nn.init and initialize smear gate's weight as well --- nanochat/gpt.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nanochat/gpt.py b/nanochat/gpt.py index b2656508..96010419 100644 --- a/nanochat/gpt.py +++ b/nanochat/gpt.py @@ -237,8 +237,9 @@ class GPT(nn.Module): # Decaying x0 init: earlier layers get more input embedding blending for i in range(n_layer): self.x0_lambdas.data[i] = 0.20 - (0.15 * i / max(n_layer - 1, 1)) - self.smear_lambda.fill_(0.0) - self.backout_lambda.fill_(0.2) + torch.nn.init.zeros_(self.smear_lambda) + torch.nn.init.constant_(self.backout_lambda, 0.2) + torch.nn.init.uniform_(self.smear_gate.weight, 0.0, 0.02) # Value embeddings (init like c_v: uniform with same std) for ve in self.value_embeds.values(): From a3ca42a678c0090e5d4f6b6d5be5782efdd0a225 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 13 Apr 2026 14:17:23 +0200 Subject: [PATCH 5/5] add comment --- nanochat/gpt.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nanochat/gpt.py b/nanochat/gpt.py index 96010419..07a1eae8 100644 --- a/nanochat/gpt.py +++ b/nanochat/gpt.py @@ -237,6 +237,8 @@ class GPT(nn.Module): # Decaying x0 init: earlier layers get more input embedding blending for i in range(n_layer): self.x0_lambdas.data[i] = 0.20 - (0.15 * i / max(n_layer - 1, 1)) + + # Smear/backout scalars and smear gate must be explicitly initialized torch.nn.init.zeros_(self.smear_lambda) torch.nn.init.constant_(self.backout_lambda, 0.2) torch.nn.init.uniform_(self.smear_gate.weight, 0.0, 0.02)