From 4e70a2b678ddd7d027f304475d271b8f568508bd Mon Sep 17 00:00:00 2001 From: Franci Penov Date: Sat, 31 Jan 2026 13:07:45 -0800 Subject: [PATCH] Update nanochat/flash_attention.py Code review suggestion Co-authored-by: Sofie Van Landeghem --- nanochat/flash_attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nanochat/flash_attention.py b/nanochat/flash_attention.py index 02d163e..89ca42b 100644 --- a/nanochat/flash_attention.py +++ b/nanochat/flash_attention.py @@ -25,7 +25,7 @@ def _load_flash_attention_3(): if not torch.cuda.is_available(): return None try: - major, minor = torch.cuda.get_device_capability() + major, _ = torch.cuda.get_device_capability() # FA3 kernels are compiled for Hopper (sm90) only # Ada (sm89), Blackwell (sm100) need SDPA fallback until FA3 is recompiled if major != 9: