Update nanochat/flash_attention.py

Code review suggestion

Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>
This commit is contained in:
Franci Penov 2026-01-31 13:07:45 -08:00 committed by GitHub
parent 2e45b7800a
commit 4e70a2b678
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -25,7 +25,7 @@ def _load_flash_attention_3():
if not torch.cuda.is_available():
return None
try:
major, minor = torch.cuda.get_device_capability()
major, _ = torch.cuda.get_device_capability()
# FA3 kernels are compiled for Hopper (sm90) only
# Ada (sm89), Blackwell (sm100) need SDPA fallback until FA3 is recompiled
if major != 9: