mirror of
https://github.com/deepseek-ai/FlashMLA
synced 2025-06-26 18:15:54 +00:00
update .gitignore
This commit is contained in:
parent
9c5dfab6d1
commit
9edee0c022
1
.gitignore
vendored
1
.gitignore
vendored
@ -7,3 +7,4 @@ dist/
|
||||
*.png
|
||||
/.vscode
|
||||
compile_commands.json
|
||||
.cache
|
||||
|
2
setup.py
2
setup.py
@ -11,10 +11,12 @@ from torch.utils.cpp_extension import (
|
||||
IS_WINDOWS,
|
||||
)
|
||||
|
||||
|
||||
def append_nvcc_threads(nvcc_extra_args):
|
||||
nvcc_threads = os.getenv("NVCC_THREADS") or "32"
|
||||
return nvcc_extra_args + ["--threads", nvcc_threads]
|
||||
|
||||
|
||||
def get_features_args():
|
||||
features_args = []
|
||||
DISABLE_FP16 = os.getenv("FLASH_MLA_DISABLE_FP16", "FALSE") in ["TRUE", "1"]
|
||||
|
Loading…
Reference in New Issue
Block a user