mirror of
https://github.com/karpathy/nanochat.git
synced 2025-12-06 04:12:13 +00:00
Compare commits
4 Commits
b31111f9cf
...
dae01075f0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dae01075f0 | ||
|
|
4a87a0d19f | ||
|
|
11e68bf442 | ||
|
|
919ea572b0 |
|
|
@ -0,0 +1,33 @@
|
|||
# Import all submodules used by scripts
|
||||
from . import common
|
||||
from . import tokenizer
|
||||
from . import checkpoint_manager
|
||||
from . import core_eval
|
||||
from . import gpt
|
||||
from . import dataloader
|
||||
from . import loss_eval
|
||||
from . import engine
|
||||
from . import dataset
|
||||
from . import report
|
||||
from . import adamw
|
||||
from . import muon
|
||||
from . import configurator
|
||||
from . import execution
|
||||
|
||||
# Make submodules available
|
||||
__all__ = [
|
||||
"common",
|
||||
"tokenizer",
|
||||
"checkpoint_manager",
|
||||
"core_eval",
|
||||
"gpt",
|
||||
"dataloader",
|
||||
"loss_eval",
|
||||
"engine",
|
||||
"dataset",
|
||||
"report",
|
||||
"adamw",
|
||||
"muon",
|
||||
"configurator",
|
||||
"execution",
|
||||
]
|
||||
|
|
@ -244,7 +244,7 @@ class GPT(nn.Module):
|
|||
def forward(self, idx, targets=None, kv_cache=None, loss_reduction='mean'):
|
||||
B, T = idx.size()
|
||||
|
||||
# Grab the rotary embeddings for the current sequence length (they are of shape (1, seq_len, 1, head_dim))
|
||||
# Grab the rotary embeddings for the current sequence length (they are of shape (1, seq_len, 1, head_dim/2))
|
||||
assert T <= self.cos.size(1), f"Sequence length grew beyond the rotary embeddings cache: {T} > {self.cos.size(1)}"
|
||||
assert idx.device == self.cos.device, f"Rotary embeddings and idx are on different devices: {idx.device} != {self.cos.device}"
|
||||
assert self.cos.dtype == torch.bfloat16, "Rotary embeddings must be in bfloat16"
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user